inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops;
 }
 
+static inline bool mptcp_has_another_subflow(struct sock *ssk)
+{
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk), *tmp;
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+
+       mptcp_for_each_subflow(msk, tmp) {
+               if (tmp != subflow)
+                       return true;
+       }
+
+       return false;
+}
+
 void __init mptcp_proto_init(void);
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
 int __init mptcp_proto_v6_init(void);
 
        csum = csum_partial(&header, sizeof(header), subflow->map_data_csum);
        if (unlikely(csum_fold(csum))) {
                MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
+               subflow->send_mp_fail = 1;
                return subflow->mp_join ? MAPPING_INVALID : MAPPING_DUMMY;
        }
 
 
 fallback:
        /* RFC 8684 section 3.7. */
+       if (subflow->send_mp_fail) {
+               if (mptcp_has_another_subflow(ssk)) {
+                       while ((skb = skb_peek(&ssk->sk_receive_queue)))
+                               sk_eat_skb(ssk, skb);
+               }
+               ssk->sk_err = EBADMSG;
+               tcp_set_state(ssk, TCP_CLOSE);
+               subflow->reset_transient = 0;
+               subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
+               tcp_send_active_reset(ssk, GFP_ATOMIC);
+               WRITE_ONCE(subflow->data_avail, 0);
+               return true;
+       }
+
        if (subflow->mp_join || subflow->fully_established) {
                /* fatal protocol error, close the socket.
                 * subflow_error_report() will introduce the appropriate barriers