]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
tcp: wrap mptcp and decrypted checks into tcp_skb_can_collapse_rx()
authorJakub Kicinski <kuba@kernel.org>
Thu, 30 May 2024 23:36:14 +0000 (16:36 -0700)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 4 Jun 2024 11:23:30 +0000 (13:23 +0200)
tcp_skb_can_collapse() checks for conditions which don't make
sense on input. Because of this we ended up sprinkling a few
pairs of mptcp_skb_can_collapse() and skb_cmp_decrypted() calls
on the input path. Group them in a new helper. This should make
it less likely that someone will check mptcp and not decrypted
or vice versa when adding new code.

This implicitly adds a decrypted check early in tcp_collapse().
AFAIU this will very slightly increase our ability to collapse
packets under memory pressure, not a real bug.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
include/net/tcp.h
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c

index 32815a40dea16637d2cc49d46863b532a44fbab3..32741856da0152af19074662b8183a3c47cc63f1 100644 (file)
@@ -1071,6 +1071,13 @@ static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
                      skb_pure_zcopy_same(to, from));
 }
 
+static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
+                                          const struct sk_buff *from)
+{
+       return likely(mptcp_skb_can_collapse(to, from) &&
+                     !skb_cmp_decrypted(to, from));
+}
+
 /* Events passed to congestion control interface */
 enum tcp_ca_event {
        CA_EVENT_TX_START,      /* first transmit when no packets in flight */
index 5aadf64e554d8009b2739613c279bbf82a05bbdd..212b6fd0caf76b0fb96f8dc06933f7680add3d26 100644 (file)
@@ -4813,10 +4813,7 @@ static bool tcp_try_coalesce(struct sock *sk,
        if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
                return false;
 
-       if (!mptcp_skb_can_collapse(to, from))
-               return false;
-
-       if (skb_cmp_decrypted(from, to))
+       if (!tcp_skb_can_collapse_rx(to, from))
                return false;
 
        if (!skb_try_coalesce(to, from, fragstolen, &delta))
@@ -5372,7 +5369,7 @@ restart:
                        break;
                }
 
-               if (n && n != tail && mptcp_skb_can_collapse(skb, n) &&
+               if (n && n != tail && tcp_skb_can_collapse_rx(skb, n) &&
                    TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
                        end_of_skbs = false;
                        break;
@@ -5423,11 +5420,9 @@ restart:
                                skb = tcp_collapse_one(sk, skb, list, root);
                                if (!skb ||
                                    skb == tail ||
-                                   !mptcp_skb_can_collapse(nskb, skb) ||
+                                   !tcp_skb_can_collapse_rx(nskb, skb) ||
                                    (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
                                        goto end;
-                               if (skb_cmp_decrypted(skb, nskb))
-                                       goto end;
                        }
                }
        }
index 59d5b064f23346080fb5057196406bbf9932a315..04044605cadf1ffe86b4fa9bc75c1ff812f7f053 100644 (file)
@@ -2044,8 +2044,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
              TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
            ((TCP_SKB_CB(tail)->tcp_flags ^
              TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
-           !mptcp_skb_can_collapse(tail, skb) ||
-           skb_cmp_decrypted(tail, skb) ||
+           !tcp_skb_can_collapse_rx(tail, skb) ||
            thtail->doff != th->doff ||
            memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
                goto no_coalesce;