]> www.infradead.org Git - users/hch/xfs.git/commitdiff
mptcp: don't always assume copied data in mptcp_cleanup_rbuf()
authorPaolo Abeni <pabeni@redhat.com>
Mon, 30 Dec 2024 18:12:31 +0000 (19:12 +0100)
committerJakub Kicinski <kuba@kernel.org>
Fri, 3 Jan 2025 02:44:03 +0000 (18:44 -0800)
Under some corner cases the MPTCP protocol can end-up invoking
mptcp_cleanup_rbuf() when no data has been copied, but such helper
assumes the opposite condition.

Explicitly drop such assumption and performs the costly call only
when strictly needed - before releasing the msk socket lock.

Fixes: fd8976790a6c ("mptcp: be careful on MPTCP-level ack.")
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://patch.msgid.link/20241230-net-mptcp-rbuf-fixes-v1-2-8608af434ceb@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/mptcp/protocol.c

index 27afdb7e2071b16dbc4dfa1199b6e78c784f7a7c..5307fff9d995309591ed742801350078db519f79 100644 (file)
@@ -528,13 +528,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
                mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
 }
 
-static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
+static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
 {
        bool slow;
 
        slow = lock_sock_fast(ssk);
        if (tcp_can_send_ack(ssk))
-               tcp_cleanup_rbuf(ssk, 1);
+               tcp_cleanup_rbuf(ssk, copied);
        unlock_sock_fast(ssk, slow);
 }
 
@@ -551,7 +551,7 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
                              (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
 }
 
-static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
+static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
 {
        int old_space = READ_ONCE(msk->old_wspace);
        struct mptcp_subflow_context *subflow;
@@ -559,14 +559,14 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
        int space =  __mptcp_space(sk);
        bool cleanup, rx_empty;
 
-       cleanup = (space > 0) && (space >= (old_space << 1));
-       rx_empty = !__mptcp_rmem(sk);
+       cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
+       rx_empty = !__mptcp_rmem(sk) && copied;
 
        mptcp_for_each_subflow(msk, subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
 
                if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
-                       mptcp_subflow_cleanup_rbuf(ssk);
+                       mptcp_subflow_cleanup_rbuf(ssk, copied);
        }
 }
 
@@ -2220,9 +2220,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 
                copied += bytes_read;
 
-               /* be sure to advertise window change */
-               mptcp_cleanup_rbuf(msk);
-
                if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
                        continue;
 
@@ -2271,6 +2268,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                }
 
                pr_debug("block timeout %ld\n", timeo);
+               mptcp_cleanup_rbuf(msk, copied);
                err = sk_wait_data(sk, &timeo, NULL);
                if (err < 0) {
                        err = copied ? : err;
@@ -2278,6 +2276,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                }
        }
 
+       mptcp_cleanup_rbuf(msk, copied);
+
 out_err:
        if (cmsg_flags && copied >= 0) {
                if (cmsg_flags & MPTCP_CMSG_TS)