]> www.infradead.org Git - users/hch/misc.git/commitdiff
tcp: move tcp_clean_acked to tcp_sock_read_tx group
authorEric Dumazet <edumazet@google.com>
Fri, 19 Sep 2025 20:48:54 +0000 (20:48 +0000)
committerJakub Kicinski <kuba@kernel.org>
Tue, 23 Sep 2025 00:55:25 +0000 (17:55 -0700)
tp->tcp_clean_acked is fetched in tx path when snd_una is updated.

This field thus belongs to tcp_sock_read_tx group.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20250919204856.2977245-7-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Documentation/networking/net_cachelines/tcp_sock.rst
include/linux/tcp.h
net/ipv4/tcp.c

index c2138619b995882663a06c2a388d5333d6fe54f0..26f32dbcf6ec9004e3be1bfefba8d303a60b1a99 100644 (file)
@@ -27,7 +27,7 @@ u32                           dsack_dups
 u32                           snd_una                 read_mostly         read_write          tcp_wnd_end,tcp_urg_mode,tcp_minshall_check,tcp_cwnd_validate(tx);tcp_ack,tcp_may_update_window,tcp_clean_rtx_queue(write),tcp_ack_tstamp(rx)
 u32                           snd_sml                 read_write                              tcp_minshall_check,tcp_minshall_update
 u32                           rcv_tstamp              read_write          read_write          tcp_ack
-void *                        tcp_clean_acked                             read_mostly         tcp_ack
+void *                        tcp_clean_acked         read_mostly                             tcp_ack
 u32                           lsndtime                read_write                              tcp_slow_start_after_idle_check,tcp_event_data_sent
 u32                           last_oow_ack_time
 u32                           compressed_ack_rcv_nxt
index c1d7fce251d74be8c5912526637f44c97905e738..3f282130c863d07cddd931b85f43afaf44bc7323 100644 (file)
@@ -215,6 +215,9 @@ struct tcp_sock {
        u16     gso_segs;       /* Max number of segs per GSO packet    */
        /* from STCP, retrans queue hinting */
        struct sk_buff *retransmit_skb_hint;
+#if defined(CONFIG_TLS_DEVICE)
+       void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq);
+#endif
        __cacheline_group_end(tcp_sock_read_tx);
 
        /* TXRX read-mostly hotpath cache lines */
@@ -250,9 +253,6 @@ struct tcp_sock {
        struct  minmax rtt_min;
        /* OOO segments go in this rbtree. Socket lock must be held. */
        struct rb_root  out_of_order_queue;
-#if defined(CONFIG_TLS_DEVICE)
-       void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq);
-#endif
        __cacheline_group_end(tcp_sock_read_rx);
 
        /* TX read-write hotpath cache lines */
index 721287ca3328eb543e1d8c999b08ca617b77b8a7..7949d16506a46eb561479b77bebce4fe88971c12 100644 (file)
@@ -5101,6 +5101,9 @@ static void __init tcp_struct_check(void)
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint);
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+       CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, tcp_clean_acked);
+#endif
 
        /* TXRX read-mostly hotpath cache lines */
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset);
@@ -5124,9 +5127,6 @@ static void __init tcp_struct_check(void)
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh);
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
-       CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tcp_clean_acked);
-#endif
 
        /* TX read-write hotpath cache lines */
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out);