/* In most cases we will be able to lock the mptcp socket.  If its already
  * owned, we need to defer to the work queue to avoid ABBA deadlock.
  */
-static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
 {
        struct sock *sk = (struct sock *)msk;
        unsigned int moved = 0;
 
        if (inet_sk_state_load(sk) == TCP_CLOSE)
-               return;
-
-       mptcp_data_lock(sk);
+               return false;
 
        __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
        __mptcp_ofo_queue(msk);
         */
        if (mptcp_pending_data_fin(sk, NULL))
                mptcp_schedule_work(sk);
-       mptcp_data_unlock(sk);
+       return moved > 0;
 }
 
 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
        struct mptcp_sock *msk = mptcp_sk(sk);
        int sk_rbuf, ssk_rbuf;
-       bool wake;
 
        /* The peer can send data while we are shutting down this
         * subflow at msk destruction time, but we must avoid enqueuing
        if (unlikely(subflow->disposable))
                return;
 
-       /* move_skbs_to_msk below can legitly clear the data_avail flag,
-        * but we will need later to properly woke the reader, cache its
-        * value
-        */
-       wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
-       if (wake)
-               set_bit(MPTCP_DATA_READY, &msk->flags);
-
        ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
        sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
        if (unlikely(ssk_rbuf > sk_rbuf))
                sk_rbuf = ssk_rbuf;
 
-       /* over limit? can't append more skbs to msk */
+       /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
        if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
-               goto wake;
-
-       move_skbs_to_msk(msk, ssk);
+               return;
 
-wake:
-       if (wake)
+       /* Wake-up the reader only for in-sequence data */
+       mptcp_data_lock(sk);
+       if (move_skbs_to_msk(msk, ssk)) {
+               set_bit(MPTCP_DATA_READY, &msk->flags);
                sk->sk_data_ready(sk);
+       }
+       mptcp_data_unlock(sk);
 }
 
 static bool mptcp_do_flush_join_list(struct mptcp_sock *msk)
        sock_owned_by_me(sk);
 
        mptcp_for_each_subflow(msk, subflow) {
-               if (subflow->data_avail)
+               if (READ_ONCE(subflow->data_avail))
                        return mptcp_subflow_tcp_sock(subflow);
        }
 
 
        struct sk_buff *skb;
 
        if (!skb_peek(&ssk->sk_receive_queue))
-               subflow->data_avail = 0;
+               WRITE_ONCE(subflow->data_avail, 0);
        if (subflow->data_avail)
                return true;
 
                ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
                pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
                         ack_seq);
-               if (ack_seq == old_ack) {
-                       subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
-                       break;
-               } else if (after64(ack_seq, old_ack)) {
-                       subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
-                       break;
+               if (unlikely(before64(ack_seq, old_ack))) {
+                       mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+                       continue;
                }
 
-               /* only accept in-sequence mapping. Old values are spurious
-                * retransmission
-                */
-               mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+               WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+               break;
        }
        return true;
 
                subflow->reset_transient = 0;
                subflow->reset_reason = MPTCP_RST_EMPTCP;
                tcp_send_active_reset(ssk, GFP_ATOMIC);
-               subflow->data_avail = 0;
+               WRITE_ONCE(subflow->data_avail, 0);
                return false;
        }
 
        subflow->map_seq = READ_ONCE(msk->ack_seq);
        subflow->map_data_len = skb->len;
        subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
-       subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
+       WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
        return true;
 }
 
        if (subflow->map_valid &&
            mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
                subflow->map_valid = 0;
-               subflow->data_avail = 0;
+               WRITE_ONCE(subflow->data_avail, 0);
 
                pr_debug("Done with mapping: seq=%u data_len=%u",
                         subflow->map_subflow_seq,