]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
net: better skb->sender_cpu and skb->napi_id cohabitation
authorEric Dumazet <edumazet@google.com>
Wed, 11 Oct 2017 09:31:13 +0000 (17:31 +0800)
committerEthan Zhao <ethan.zhao@oracle.com>
Fri, 13 Oct 2017 02:06:29 +0000 (22:06 -0400)
Orabug: 26953388
Orabug: 26591689

skb->sender_cpu and skb->napi_id share a common storage,
and we had various bugs about this.

We had to call skb_sender_cpu_clear() in some places to
not leave a prior skb->napi_id and fool netdev_pick_tx()

As suggested by Alexei, we could split the space so that
these errors can not happen.

0 value being reserved as the common (not initialized) value,
let's reserve [1 .. NR_CPUS] range for valid sender_cpu,
and [NR_CPUS+1 .. ~0U] for valid napi_id.

This will allow proper busy polling support over tunnels.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Suggested-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Ethan Zhao <ethan.zhao@oracle.com>
Reviewed-by: Jack Vogel <jack.vogel@oracle.com>
include/linux/skbuff.h
net/core/dev.c
net/core/flow_dissector.c

index be7102b9cc165863210bab0831010cab13b855e5..4921d9aee5858ae1249d62ecae5826ba451edef1 100644 (file)
@@ -956,9 +956,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
 
 static inline void skb_sender_cpu_clear(struct sk_buff *skb)
 {
-#ifdef CONFIG_XPS
-       skb->sender_cpu = 0;
-#endif
 }
 
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
index 18bfb1659fe8bccf69f7c8bb73dc740616ee865f..1346af1b16ba4f24253ce523f853c36899051802 100644 (file)
@@ -180,7 +180,7 @@ EXPORT_SYMBOL(dev_base_lock);
 /* protects napi_hash addition/deletion and napi_gen_id */
 static DEFINE_SPINLOCK(napi_hash_lock);
 
-static unsigned int napi_gen_id;
+static unsigned int napi_gen_id = NR_CPUS;
 static DEFINE_HASHTABLE(napi_hash, 8);
 
 static seqcount_t devnet_rename_seq;
@@ -4519,25 +4519,22 @@ EXPORT_SYMBOL_GPL(napi_by_id);
 
 void napi_hash_add(struct napi_struct *napi)
 {
-       if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
+       if (test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
+               return;
 
-               spin_lock(&napi_hash_lock);
+       spin_lock(&napi_hash_lock);
 
-               /* 0 is not a valid id, we also skip an id that is taken
-                * we expect both events to be extremely rare
-                */
-               napi->napi_id = 0;
-               while (!napi->napi_id) {
-                       napi->napi_id = ++napi_gen_id;
-                       if (napi_by_id(napi->napi_id))
-                               napi->napi_id = 0;
-               }
+       /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
+       do {
+               if (unlikely(++napi_gen_id < NR_CPUS + 1))
+                       napi_gen_id = NR_CPUS + 1;
+       } while (napi_by_id(napi_gen_id));
+       napi->napi_id = napi_gen_id;
 
-               hlist_add_head_rcu(&napi->napi_hash_node,
-                       &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
+       hlist_add_head_rcu(&napi->napi_hash_node,
+               &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
 
-               spin_unlock(&napi_hash_lock);
-       }
+       spin_unlock(&napi_hash_lock);
 }
 EXPORT_SYMBOL_GPL(napi_hash_add);
 
index 3556791fdc6ebe4ea28cf8602f656e28a0e1f09b..a262b3780fc4fd5a9e17bc918c408e0718731ee2 100644 (file)
@@ -476,7 +476,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
        int queue_index = 0;
 
 #ifdef CONFIG_XPS
-       if (skb->sender_cpu == 0)
+       u32 sender_cpu = skb->sender_cpu - 1;
+
+        if (sender_cpu >= (u32)NR_CPUS)        
                skb->sender_cpu = raw_smp_processor_id() + 1;
 #endif