]> www.infradead.org Git - users/hch/misc.git/commitdiff
tcp: move tcp->rcv_tstamp to tcp_sock_write_txrx group
authorEric Dumazet <edumazet@google.com>
Fri, 19 Sep 2025 20:48:52 +0000 (20:48 +0000)
committerJakub Kicinski <kuba@kernel.org>
Tue, 23 Sep 2025 00:55:24 +0000 (17:55 -0700)
tcp_ack() writes this field, it belongs to tcp_sock_write_txrx.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20250919204856.2977245-5-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Documentation/networking/net_cachelines/tcp_sock.rst
include/linux/tcp.h
net/ipv4/tcp.c

index d4dc018009451261c81a46dac2d6322005901c99..429df29fba8bc08bce519870e403815780a2182b 100644 (file)
@@ -26,7 +26,7 @@ u64                           bytes_acked                                 read_w
 u32                           dsack_dups
 u32                           snd_una                 read_mostly         read_write          tcp_wnd_end,tcp_urg_mode,tcp_minshall_check,tcp_cwnd_validate(tx);tcp_ack,tcp_may_update_window,tcp_clean_rtx_queue(write),tcp_ack_tstamp(rx)
 u32                           snd_sml                 read_write                              tcp_minshall_check,tcp_minshall_update
-u32                           rcv_tstamp                                  read_mostly         tcp_ack
+u32                           rcv_tstamp              read_write          read_write          tcp_ack
 void *                        tcp_clean_acked                             read_mostly         tcp_ack
 u32                           lsndtime                read_write                              tcp_slow_start_after_idle_check,tcp_event_data_sent
 u32                           last_oow_ack_time
index 3ca5ed02de6d48e64c26744f117d72675f84a3f3..1e6c2ded22c985134bd48b7bf5fd464e01e2fd51 100644 (file)
@@ -238,7 +238,6 @@ struct tcp_sock {
        /* RX read-mostly hotpath cache lines */
        __cacheline_group_begin(tcp_sock_read_rx);
        u32     copied_seq;     /* Head of yet unread data */
-       u32     rcv_tstamp;     /* timestamp of last received ACK (for keepalives) */
        u32     snd_wl1;        /* Sequence for window update           */
        u32     tlp_high_seq;   /* snd_nxt at the time of TLP */
        u32     rttvar_us;      /* smoothed mdev_max                    */
@@ -246,13 +245,13 @@ struct tcp_sock {
        u16     advmss;         /* Advertised MSS                       */
        u16     urg_data;       /* Saved octet of OOB data and control flags */
        u32     lost;           /* Total data packets lost incl. rexmits */
+       u32     snd_ssthresh;   /* Slow start size threshold            */
        struct  minmax rtt_min;
        /* OOO segments go in this rbtree. Socket lock must be held. */
        struct rb_root  out_of_order_queue;
 #if defined(CONFIG_TLS_DEVICE)
        void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq);
 #endif
-       u32     snd_ssthresh;   /* Slow start size threshold            */
        u8      recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */
        __cacheline_group_end(tcp_sock_read_rx);
 
@@ -319,6 +318,7 @@ struct tcp_sock {
                                        */
        u32     app_limited;    /* limited until "delivered" reaches this val */
        u32     rcv_wnd;        /* Current receiver window              */
+       u32     rcv_tstamp;     /* timestamp of last received ACK (for keepalives) */
 /*
  *      Options received (usually on last packet, some only on SYN packets).
  */
index 5932dba3bd717b59e730630d7390b65f329b03c2..721287ca3328eb543e1d8c999b08ca617b77b8a7 100644 (file)
@@ -5114,7 +5114,6 @@ static void __init tcp_struct_check(void)
 
        /* RX read-mostly hotpath cache lines */
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq);
-       CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rcv_tstamp);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_wl1);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tlp_high_seq);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rttvar_us);
@@ -5164,6 +5163,7 @@ static void __init tcp_struct_check(void)
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, received_ecn_bytes);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd);
+       CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_tstamp);
        CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt);
 
        /* RX read-write hotpath cache lines */