]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
net: Make napi_hash_lock irq safe
authorJoe Damato <jdamato@fastly.com>
Mon, 2 Dec 2024 18:21:02 +0000 (18:21 +0000)
committerJakub Kicinski <kuba@kernel.org>
Wed, 4 Dec 2024 02:25:33 +0000 (18:25 -0800)
Make napi_hash_lock IRQ safe. It is used during the control path, and is
taken and released in napi_hash_add and napi_hash_del, which will
typically be called by calls to napi_enable and napi_disable.

This change avoids a deadlock in pcnet32 (and other any other drivers
which follow the same pattern):

 CPU 0:
 pcnet32_open
    spin_lock_irqsave(&lp->lock, ...)
      napi_enable
        napi_hash_add <- before this executes, CPU 1 proceeds
          spin_lock(napi_hash_lock)
       [...]
    spin_unlock_irqrestore(&lp->lock, flags);

 CPU 1:
   pcnet32_close
     napi_disable
       napi_hash_del
         spin_lock(napi_hash_lock)
          < INTERRUPT >
            pcnet32_interrupt
              spin_lock(lp->lock) <- DEADLOCK

Changing the napi_hash_lock to be IRQ safe prevents the IRQ from firing
on CPU 1 until napi_hash_lock is released, preventing the deadlock.

Cc: stable@vger.kernel.org
Fixes: 86e25f40aa1e ("net: napi: Add napi_config")
Reported-by: Guenter Roeck <linux@roeck-us.net>
Closes: https://lore.kernel.org/netdev/85dd4590-ea6b-427d-876a-1d8559c7ad82@roeck-us.net/
Suggested-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Joe Damato <jdamato@fastly.com>
Tested-by: Guenter Roeck <linux@roeck-us.net>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20241202182103.363038-1-jdamato@fastly.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/dev.c

index 13d00fc10f55998077cb643a2f6e3c171974589d..45a8c3dd4a64839c403dec5e3f763dfa2f591415 100644 (file)
@@ -6557,18 +6557,22 @@ static void __napi_hash_add_with_id(struct napi_struct *napi,
 static void napi_hash_add_with_id(struct napi_struct *napi,
                                  unsigned int napi_id)
 {
-       spin_lock(&napi_hash_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&napi_hash_lock, flags);
        WARN_ON_ONCE(napi_by_id(napi_id));
        __napi_hash_add_with_id(napi, napi_id);
-       spin_unlock(&napi_hash_lock);
+       spin_unlock_irqrestore(&napi_hash_lock, flags);
 }
 
 static void napi_hash_add(struct napi_struct *napi)
 {
+       unsigned long flags;
+
        if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
                return;
 
-       spin_lock(&napi_hash_lock);
+       spin_lock_irqsave(&napi_hash_lock, flags);
 
        /* 0..NR_CPUS range is reserved for sender_cpu use */
        do {
@@ -6578,7 +6582,7 @@ static void napi_hash_add(struct napi_struct *napi)
 
        __napi_hash_add_with_id(napi, napi_gen_id);
 
-       spin_unlock(&napi_hash_lock);
+       spin_unlock_irqrestore(&napi_hash_lock, flags);
 }
 
 /* Warning : caller is responsible to make sure rcu grace period
@@ -6586,11 +6590,13 @@ static void napi_hash_add(struct napi_struct *napi)
  */
 static void napi_hash_del(struct napi_struct *napi)
 {
-       spin_lock(&napi_hash_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&napi_hash_lock, flags);
 
        hlist_del_init_rcu(&napi->napi_hash_node);
 
-       spin_unlock(&napi_hash_lock);
+       spin_unlock_irqrestore(&napi_hash_lock, flags);
 }
 
 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)