]> www.infradead.org Git - users/hch/misc.git/commitdiff
udp: add udp_drops_inc() helper
authorEric Dumazet <edumazet@google.com>
Tue, 16 Sep 2025 16:09:49 +0000 (16:09 +0000)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 18 Sep 2025 08:17:10 +0000 (10:17 +0200)
Generic sk_drops_inc() reads sk->sk_drop_counters.
We know the precise location for UDP sockets.

Move sk_drop_counters out of sock_read_rxtx
so that sock_write_rxtx starts at a cache line boundary.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20250916160951.541279-9-edumazet@google.com
Reviewed-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
include/net/sock.h
include/net/udp.h
net/core/sock.c
net/ipv4/udp.c
net/ipv6/udp.c

index 867dc44140d4c1b56ecfab1220c81133fe0394a0..82bcdb7d7e6779de41ace0dde3a8b54e6adb0c14 100644 (file)
@@ -451,7 +451,6 @@ struct sock {
 #ifdef CONFIG_XFRM
        struct xfrm_policy __rcu *sk_policy[2];
 #endif
-       struct numa_drop_counters *sk_drop_counters;
        __cacheline_group_end(sock_read_rxtx);
 
        __cacheline_group_begin(sock_write_rxtx);
@@ -568,6 +567,7 @@ struct sock {
 #ifdef CONFIG_BPF_SYSCALL
        struct bpf_local_storage __rcu  *sk_bpf_storage;
 #endif
+       struct numa_drop_counters *sk_drop_counters;
        struct rcu_head         sk_rcu;
        netns_tracker           ns_tracker;
        struct xarray           sk_user_frags;
index 93b159f30e884ce7d30e2d2240b846441c5e135b..a08822e294b038c0d00d4a5f5cac62286a207926 100644 (file)
@@ -295,6 +295,11 @@ static inline void udp_lib_init_sock(struct sock *sk)
        set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
 }
 
+static inline void udp_drops_inc(struct sock *sk)
+{
+       numa_drop_add(&udp_sk(sk)->drop_counters, 1);
+}
+
 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
 static inline int udp_lib_hash(struct sock *sk)
 {
index 1f8ef4d8bcd9e8084eda82cad44c010071ceb171..21742da19e45bbe53e84b8a87d5a23bc2d2275f8 100644 (file)
@@ -4444,7 +4444,6 @@ static int __init sock_struct_check(void)
 #ifdef CONFIG_MEMCG
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
 #endif
-       CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_drop_counters);
 
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
index 658ae87827991a78c25c2172d52e772c94ea217f..25143f932447df2a84dd113ca33e1ccf15b3503c 100644 (file)
@@ -1790,7 +1790,7 @@ uncharge_drop:
        atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
 
 drop:
-       sk_drops_inc(sk);
+       udp_drops_inc(sk);
        busylock_release(busy);
        return err;
 }
@@ -1855,7 +1855,7 @@ static struct sk_buff *__first_packet_length(struct sock *sk,
                                        IS_UDPLITE(sk));
                        __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
                                        IS_UDPLITE(sk));
-                       sk_drops_inc(sk);
+                       udp_drops_inc(sk);
                        __skb_unlink(skb, rcvq);
                        *total += skb->truesize;
                        kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
@@ -2011,7 +2011,7 @@ try_again:
 
                __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite);
                __UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite);
-               sk_drops_inc(sk);
+               udp_drops_inc(sk);
                kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
                goto try_again;
        }
@@ -2081,7 +2081,7 @@ try_again:
 
        if (unlikely(err)) {
                if (!peeking) {
-                       sk_drops_inc(sk);
+                       udp_drops_inc(sk);
                        UDP_INC_STATS(sock_net(sk),
                                      UDP_MIB_INERRORS, is_udplite);
                }
@@ -2452,7 +2452,7 @@ csum_error:
        __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
        __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
-       sk_drops_inc(sk);
+       udp_drops_inc(sk);
        sk_skb_reason_drop(sk, skb, drop_reason);
        return -1;
 }
@@ -2537,7 +2537,7 @@ start_lookup:
                nskb = skb_clone(skb, GFP_ATOMIC);
 
                if (unlikely(!nskb)) {
-                       sk_drops_inc(sk);
+                       udp_drops_inc(sk);
                        __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
                                        IS_UDPLITE(sk));
                        __UDP_INC_STATS(net, UDP_MIB_INERRORS,
index e87d0ef861f88af3ff7bf9dd5045c4d4601036e3..9f4d340d1e3a63d38f80138ef9f6aac4a33afa05 100644 (file)
@@ -524,7 +524,7 @@ try_again:
        }
        if (unlikely(err)) {
                if (!peeking) {
-                       sk_drops_inc(sk);
+                       udp_drops_inc(sk);
                        SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
                }
                kfree_skb(skb);
@@ -908,7 +908,7 @@ csum_error:
        __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
        __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
-       sk_drops_inc(sk);
+       udp_drops_inc(sk);
        sk_skb_reason_drop(sk, skb, drop_reason);
        return -1;
 }
@@ -1013,7 +1013,7 @@ start_lookup:
                }
                nskb = skb_clone(skb, GFP_ATOMIC);
                if (unlikely(!nskb)) {
-                       sk_drops_inc(sk);
+                       udp_drops_inc(sk);
                        __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
                                         IS_UDPLITE(sk));
                        __UDP6_INC_STATS(net, UDP_MIB_INERRORS,