]> www.infradead.org Git - users/hch/misc.git/commitdiff
net: move sk->sk_err_soft and sk->sk_sndbuf
authorEric Dumazet <edumazet@google.com>
Fri, 19 Sep 2025 20:48:50 +0000 (20:48 +0000)
committerJakub Kicinski <kuba@kernel.org>
Tue, 23 Sep 2025 00:55:24 +0000 (17:55 -0700)
sk->sk_sndbuf is read-mostly in tx path, so move it from
sock_write_tx group to more appropriate sock_read_tx.

sk->sk_err_soft was not identified previously, but
is used from tcp_ack().

Move it to sock_write_tx group for better cache locality.

Also change tcp_ack() to clear sk->sk_err_soft only if needed.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20250919204856.2977245-3-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/sock.h
net/core/sock.c
net/ipv4/tcp_input.c

index 66c2f396b57de5a0048b4ae1e6181c4473be15c5..b4fefeea0213a548a1c3601b95f902a5fa499bc6 100644 (file)
@@ -467,7 +467,7 @@ struct sock {
        __cacheline_group_begin(sock_write_tx);
        int                     sk_write_pending;
        atomic_t                sk_omem_alloc;
-       int                     sk_sndbuf;
+       int                     sk_err_soft;
 
        int                     sk_wmem_queued;
        refcount_t              sk_wmem_alloc;
@@ -507,6 +507,7 @@ struct sock {
        unsigned int            sk_gso_max_size;
        gfp_t                   sk_allocation;
        u32                     sk_txhash;
+       int                     sk_sndbuf;
        u8                      sk_pacing_shift;
        bool                    sk_use_task_frag;
        __cacheline_group_end(sock_read_tx);
@@ -523,7 +524,6 @@ struct sock {
        unsigned long           sk_lingertime;
        struct proto            *sk_prot_creator;
        rwlock_t                sk_callback_lock;
-       int                     sk_err_soft;
        u32                     sk_ack_backlog;
        u32                     sk_max_ack_backlog;
        unsigned long           sk_ino;
index ad79efde447675c8a8a3aafe204e2bbb1a5efe7c..dc03d4b5909a2a68aee84eb9a153b2c3970f6b32 100644 (file)
@@ -4452,7 +4452,7 @@ static int __init sock_struct_check(void)
 
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
-       CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf);
+       CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_err_soft);
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued);
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc);
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags);
@@ -4479,6 +4479,7 @@ static int __init sock_struct_check(void)
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size);
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation);
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash);
+       CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndbuf);
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs);
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift);
        CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag);
index 9fdc6ce25eb1035a88ff2640601cc665187a78b2..f93d48d98d5dacf2ee868cd6b2d65a396443d106 100644 (file)
@@ -4085,7 +4085,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        /* We passed data and got it acked, remove any soft error
         * log. Something worked...
         */
-       WRITE_ONCE(sk->sk_err_soft, 0);
+       if (READ_ONCE(sk->sk_err_soft))
+               WRITE_ONCE(sk->sk_err_soft, 0);
        WRITE_ONCE(icsk->icsk_probes_out, 0);
        tp->rcv_tstamp = tcp_jiffies32;
        if (!prior_packets)