u8 ws;                  /* window scale, 0 to disable */
        u8 num_sack_blocks;     /* number of SACK blocks to include */
        u8 hash_size;           /* bytes in hash_location */
+       u8 bpf_opt_len;         /* length of BPF hdr option */
        __u8 *hash_location;    /* temporary pointer, overloaded */
        __u32 tsval, tsecr;     /* need to include OPTION_TS */
        struct tcp_fastopen_cookie *fastopen_cookie;    /* Fast open cookie */
 #endif
 }
 
+#ifdef CONFIG_CGROUP_BPF
+/* req, syn_skb and synack_type are used when writing synack */
+static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
+                                 struct request_sock *req,
+                                 struct sk_buff *syn_skb,
+                                 enum tcp_synack_type synack_type,
+                                 struct tcp_out_options *opts,
+                                 unsigned int *remaining)
+{
+       if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
+                                          BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) ||
+           !*remaining)
+               return;
+
+       /* The bpf running context preparation and the actual bpf prog
+        * calling will be implemented in a later PATCH together with
+        * other bpf pieces.
+        */
+}
+
+static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
+                                   struct request_sock *req,
+                                   struct sk_buff *syn_skb,
+                                   enum tcp_synack_type synack_type,
+                                   struct tcp_out_options *opts)
+{
+       if (likely(!opts->bpf_opt_len))
+               return;
+
+       /* The bpf running context preparation and the actual bpf prog
+        * calling will be implemented in a later PATCH together with
+        * other bpf pieces.
+        */
+}
+#else
+static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
+                                 struct request_sock *req,
+                                 struct sk_buff *syn_skb,
+                                 enum tcp_synack_type synack_type,
+                                 struct tcp_out_options *opts,
+                                 unsigned int *remaining)
+{
+}
+
+static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
+                                   struct request_sock *req,
+                                   struct sk_buff *syn_skb,
+                                   enum tcp_synack_type synack_type,
+                                   struct tcp_out_options *opts)
+{
+}
+#endif
+
 /* Write previously computed TCP options to the packet.
  *
  * Beware: Something in the Internet is very sensitive to the ordering of
                }
        }
 
+       bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
+
        return MAX_TCP_OPTION_SPACE - remaining;
 }
 
                                       struct tcp_out_options *opts,
                                       const struct tcp_md5sig_key *md5,
                                       struct tcp_fastopen_cookie *foc,
-                                      enum tcp_synack_type synack_type)
+                                      enum tcp_synack_type synack_type,
+                                      struct sk_buff *syn_skb)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        unsigned int remaining = MAX_TCP_OPTION_SPACE;
 
        smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
 
+       bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
+                             synack_type, opts, &remaining);
+
        return MAX_TCP_OPTION_SPACE - remaining;
 }
 
                        opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
        }
 
+       if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
+                                           BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) {
+               unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
+
+               bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
+
+               size = MAX_TCP_OPTION_SPACE - remaining;
+       }
+
        return size;
 }
 
        }
 #endif
 
+       /* BPF prog is the last one writing header option */
+       bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
+
        INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
                           tcp_v6_send_check, tcp_v4_send_check,
                           sk, skb);
 }
 
 /**
- * tcp_make_synack - Prepare a SYN-ACK.
- * sk: listener socket
- * dst: dst entry attached to the SYNACK
- * req: request_sock pointer
- * foc: cookie for tcp fast open
- * synack_type: Type of synback to prepare
- *
- * Allocate one skb and build a SYNACK packet.
- * @dst is consumed : Caller should not use it again.
+ * tcp_make_synack - Allocate one skb and build a SYNACK packet.
+ * @sk: listener socket
+ * @dst: dst entry attached to the SYNACK. It is consumed and caller
+ *       should not use it again.
+ * @req: request_sock pointer
+ * @foc: cookie for tcp fast open
+ * @synack_type: Type of synack to prepare
+ * @syn_skb: SYN packet just received.  It could be NULL for rtx case.
  */
 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
                                struct request_sock *req,
                                struct tcp_fastopen_cookie *foc,
-                               enum tcp_synack_type synack_type)
+                               enum tcp_synack_type synack_type,
+                               struct sk_buff *syn_skb)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        const struct tcp_sock *tp = tcp_sk(sk);
        md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
 #endif
        skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
+       /* bpf program will be interested in the tcp_flags */
+       TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
        tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
-                                            foc, synack_type) + sizeof(*th);
+                                            foc, synack_type,
+                                            syn_skb) + sizeof(*th);
 
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
        rcu_read_unlock();
 #endif
 
+       bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
+                               synack_type, &opts);
+
        skb->skb_mstamp_ns = now;
        tcp_add_tx_delay(skb, tp);
 
        int res;
 
        tcp_rsk(req)->txhash = net_tx_rndhash();
-       res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
+       res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
+                                 NULL);
        if (!res) {
                __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);