Currently TCP_SKB_CB() is hard-coded in skmsg code, it certainly
does not work for any other non-TCP protocols. We can move them to
skb ext, but it introduces a memory allocation on fast path.
Fortunately, we only need to a word-size to store all the information,
because the flags actually only contains 1 bit so can be just packed
into the lowest bit of the "pointer", which is stored as unsigned
long.
Inside struct sk_buff, '_skb_refdst' can be reused because skb dst is
no longer needed after ->sk_data_ready() so we can just drop it.
Signed-off-by: Cong Wang <cong.wang@bytedance.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: Jakub Sitnicki <jakub@cloudflare.com>
Link: https://lore.kernel.org/bpf/20210223184934.6054-5-xiyou.wangcong@gmail.com
                        void            (*destructor)(struct sk_buff *skb);
                };
                struct list_head        tcp_tsorted_anchor;
+#ifdef CONFIG_NET_SOCK_MSG
+               unsigned long           _sk_redir;
+#endif
        };
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 
                return false;
        return !!psock->saved_data_ready;
 }
+
+#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
+
+/* We only have one bit so far. */
+#define BPF_F_PTR_MASK ~(BPF_F_INGRESS)
+
+static inline bool skb_bpf_ingress(const struct sk_buff *skb)
+{
+       unsigned long sk_redir = skb->_sk_redir;
+
+       return sk_redir & BPF_F_INGRESS;
+}
+
+static inline void skb_bpf_set_ingress(struct sk_buff *skb)
+{
+       skb->_sk_redir |= BPF_F_INGRESS;
+}
+
+static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
+                                    bool ingress)
+{
+       skb->_sk_redir = (unsigned long)sk_redir;
+       if (ingress)
+               skb->_sk_redir |= BPF_F_INGRESS;
+}
+
+static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
+{
+       unsigned long sk_redir = skb->_sk_redir;
+
+       return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
+}
+
+static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
+{
+       skb->_sk_redir = 0;
+}
+#endif /* CONFIG_NET_SOCK_MSG */
 #endif /* _LINUX_SKMSG_H */
 
                        struct inet6_skb_parm   h6;
 #endif
                } header;       /* For incoming skbs */
-               struct {
-                       __u32 flags;
-                       struct sock *sk_redir;
-               } bpf;
        };
 };
 
 #define TCP_SKB_CB(__skb)      ((struct tcp_skb_cb *)&((__skb)->cb[0]))
 
-static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
-{
-       return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
-}
-
-static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
-{
-       return TCP_SKB_CB(skb)->bpf.sk_redir;
-}
-
-static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
-{
-       TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
-}
-
 extern const struct inet_connection_sock_af_ops ipv4_specific;
 
 #if IS_ENABLED(CONFIG_IPV6)
 
                len = skb->len;
                off = 0;
 start:
-               ingress = tcp_skb_bpf_ingress(skb);
+               ingress = skb_bpf_ingress(skb);
+               skb_bpf_redirect_clear(skb);
                do {
                        ret = -EIO;
                        if (likely(psock->sk->sk_socket))
 
 static void sk_psock_zap_ingress(struct sk_psock *psock)
 {
-       __skb_queue_purge(&psock->ingress_skb);
+       struct sk_buff *skb;
+
+       while ((skb = __skb_dequeue(&psock->ingress_skb)) != NULL) {
+               skb_bpf_redirect_clear(skb);
+               kfree_skb(skb);
+       }
        __sk_psock_purge_ingress_msg(psock);
 }
 
        struct sk_psock *psock_other;
        struct sock *sk_other;
 
-       sk_other = tcp_skb_bpf_redirect_fetch(skb);
+       sk_other = skb_bpf_redirect_fetch(skb);
        /* This error is a buggy BPF program, it returned a redirect
         * return code, but then didn't set a redirect interface.
         */
                 * TLS context.
                 */
                skb->sk = psock->sk;
-               tcp_skb_bpf_redirect_clear(skb);
+               skb_dst_drop(skb);
+               skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
-               ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+               ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
                skb->sk = NULL;
        }
        sk_psock_tls_verdict_apply(skb, psock->sk, ret);
 static void sk_psock_verdict_apply(struct sk_psock *psock,
                                   struct sk_buff *skb, int verdict)
 {
-       struct tcp_skb_cb *tcp;
        struct sock *sk_other;
        int err = -EIO;
 
                        goto out_free;
                }
 
-               tcp = TCP_SKB_CB(skb);
-               tcp->bpf.flags |= BPF_F_INGRESS;
+               skb_bpf_set_ingress(skb);
 
                /* If the queue is empty then we can submit directly
                 * into the msg queue. If its not empty we have to
        skb_set_owner_r(skb, sk);
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
-               tcp_skb_bpf_redirect_clear(skb);
+               skb_dst_drop(skb);
+               skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
-               ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+               ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
        }
        sk_psock_verdict_apply(psock, skb, ret);
 out:
        skb_set_owner_r(skb, sk);
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
-               tcp_skb_bpf_redirect_clear(skb);
+               skb_dst_drop(skb);
+               skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
-               ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+               ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
        }
        sk_psock_verdict_apply(psock, skb, ret);
 out:
 
 BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
           struct bpf_map *, map, u32, key, u64, flags)
 {
-       struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
        struct sock *sk;
 
        if (unlikely(flags & ~(BPF_F_INGRESS)))
        if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
                return SK_DROP;
 
-       tcb->bpf.flags = flags;
-       tcb->bpf.sk_redir = sk;
+       skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
        return SK_PASS;
 }
 
 BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
           struct bpf_map *, map, void *, key, u64, flags)
 {
-       struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
        struct sock *sk;
 
        if (unlikely(flags & ~(BPF_F_INGRESS)))
        if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
                return SK_DROP;
 
-       tcb->bpf.flags = flags;
-       tcb->bpf.sk_redir = sk;
+       skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
        return SK_PASS;
 }