#include "protocol.h"
 
+void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
+                                             struct request_sock *req)
+{
+       struct sock *ssk = subflow->tcp_sock;
+       struct sock *sk = subflow->conn;
+       struct sk_buff *skb;
+       struct tcp_sock *tp;
+
+       tp = tcp_sk(ssk);
+
+       subflow->is_mptfo = 1;
+
+       skb = skb_peek(&ssk->sk_receive_queue);
+       if (WARN_ON_ONCE(!skb))
+               return;
+
+       /* dequeue the skb from sk receive queue */
+       __skb_unlink(skb, &ssk->sk_receive_queue);
+       skb_ext_reset(skb);
+       skb_orphan(skb);
+
+       /* We copy the fastopen data, but that don't belong to the mptcp sequence
+        * space, need to offset it in the subflow sequence, see mptcp_subflow_get_map_offset()
+        */
+       tp->copied_seq += skb->len;
+       subflow->ssn_offset += skb->len;
+
+       /* initialize a dummy sequence number, we will update it at MPC
+        * completion, if needed
+        */
+       MPTCP_SKB_CB(skb)->map_seq = -skb->len;
+       MPTCP_SKB_CB(skb)->end_seq = 0;
+       MPTCP_SKB_CB(skb)->offset = 0;
+       MPTCP_SKB_CB(skb)->has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
+
+       mptcp_data_lock(sk);
+
+       mptcp_set_owner_r(skb, sk);
+       __skb_queue_tail(&sk->sk_receive_queue, skb);
+
+       sk->sk_data_ready(sk);
+
+       mptcp_data_unlock(sk);
+}
+
 void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
                                   const struct mptcp_options_received *mp_opt)
 {
 
 struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
 bool __mptcp_close(struct sock *sk, long timeout);
 void mptcp_cancel_work(struct sock *sk);
+void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk);
 
 bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
                           const struct mptcp_addr_info *b, bool use_port);
 
 void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
                                   const struct mptcp_options_received *mp_opt);
+void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
+                                             struct request_sock *req);
 
 static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
 {
 
        return NULL;
 }
 
+static void subflow_prep_synack(const struct sock *sk, struct request_sock *req,
+                               struct tcp_fastopen_cookie *foc,
+                               enum tcp_synack_type synack_type)
+{
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct inet_request_sock *ireq = inet_rsk(req);
+
+       /* clear tstamp_ok, as needed depending on cookie */
+       if (foc && foc->len > -1)
+               ireq->tstamp_ok = 0;
+
+       if (synack_type == TCP_SYNACK_FASTOPEN)
+               mptcp_fastopen_subflow_synack_set_params(subflow, req);
+}
+
+static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
+                                 struct flowi *fl,
+                                 struct request_sock *req,
+                                 struct tcp_fastopen_cookie *foc,
+                                 enum tcp_synack_type synack_type,
+                                 struct sk_buff *syn_skb)
+{
+       subflow_prep_synack(sk, req, foc, synack_type);
+
+       return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc,
+                                                    synack_type, syn_skb);
+}
+
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
+                                 struct flowi *fl,
+                                 struct request_sock *req,
+                                 struct tcp_fastopen_cookie *foc,
+                                 enum tcp_synack_type synack_type,
+                                 struct sk_buff *syn_skb)
+{
+       subflow_prep_synack(sk, req, foc, synack_type);
+
+       return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc,
+                                                    synack_type, syn_skb);
+}
+
 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
                                              struct sk_buff *skb,
                                              struct flowi *fl,
 
        subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
        subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
+       subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack;
 
        subflow_specific = ipv4_specific;
        subflow_specific.conn_request = subflow_v4_conn_request;
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
        subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
        subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
+       subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
 
        subflow_v6_specific = ipv6_specific;
        subflow_v6_specific.conn_request = subflow_v6_conn_request;