]> www.infradead.org Git - users/hch/misc.git/commitdiff
udp: update sk_rmem_alloc before busylock acquisition
authorEric Dumazet <edumazet@google.com>
Tue, 16 Sep 2025 16:09:47 +0000 (16:09 +0000)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 18 Sep 2025 08:17:10 +0000 (10:17 +0200)
Avoid piling too many producers on the busylock
by updating sk_rmem_alloc before busylock acquisition.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Link: https://patch.msgid.link/20250916160951.541279-7-edumazet@google.com
Reviewed-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
net/ipv4/udp.c

index edd846fee90ff7850356a5cb3400ce96856e5429..658ae87827991a78c25c2172d52e772c94ea217f 100644 (file)
@@ -1753,13 +1753,16 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
        if (rmem > (rcvbuf >> 1)) {
                skb_condense(skb);
                size = skb->truesize;
+               rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
+               if (rmem > rcvbuf)
+                       goto uncharge_drop;
                busy = busylock_acquire(sk);
+       } else {
+               atomic_add(size, &sk->sk_rmem_alloc);
        }
 
        udp_set_dev_scratch(skb);
 
-       atomic_add(size, &sk->sk_rmem_alloc);
-
        spin_lock(&list->lock);
        err = udp_rmem_schedule(sk, size);
        if (err) {