]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
net: disable netpoll on fresh napis
authorJakub Kicinski <kuba@kernel.org>
Wed, 26 Aug 2020 19:40:06 +0000 (12:40 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 12 Sep 2020 12:18:55 +0000 (14:18 +0200)
[ Upstream commit 96e97bc07e90f175a8980a22827faf702ca4cb30 ]

napi_disable() makes sure to set the NAPI_STATE_NPSVC bit to prevent
netpoll from accessing rings before init is complete. However, the
same is not done for fresh napi instances in netif_napi_add(),
even though we expect NAPI instances to be added as disabled.

This causes crashes during driver reconfiguration (enabling XDP,
changing the channel count) - if there is any printk() after
netif_napi_add() but before napi_enable().

To ensure memory ordering is correct we need to use RCU accessors.

Reported-by: Rob Sherwood <rsher@fb.com>
Fixes: 2d8bff12699a ("netpoll: Close race condition between poll_one_napi and napi_disable")
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
net/core/dev.c
net/core/netpoll.c

index 56cd7b83a38293bcc246857c158de381327b2d79..cdc1c3a144e1f1825e520cac76b3dde1e4963292 100644 (file)
@@ -6231,12 +6231,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
                netdev_err_once(dev, "%s() called with weight %d\n", __func__,
                                weight);
        napi->weight = weight;
-       list_add(&napi->dev_list, &dev->napi_list);
        napi->dev = dev;
 #ifdef CONFIG_NETPOLL
        napi->poll_owner = -1;
 #endif
        set_bit(NAPI_STATE_SCHED, &napi->state);
+       set_bit(NAPI_STATE_NPSVC, &napi->state);
+       list_add_rcu(&napi->dev_list, &dev->napi_list);
        napi_hash_add(napi);
 }
 EXPORT_SYMBOL(netif_napi_add);
index 849380a622ef9d7fc2e8970c342245e15997fbda..cb67d36f3adb03cc08c67ffde8a8391fce4fc6af 100644 (file)
@@ -161,7 +161,7 @@ static void poll_napi(struct net_device *dev)
        struct napi_struct *napi;
        int cpu = smp_processor_id();
 
-       list_for_each_entry(napi, &dev->napi_list, dev_list) {
+       list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
                if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
                        poll_one_napi(napi);
                        smp_store_release(&napi->poll_owner, -1);