#include <linux/splice.h>
 #include <linux/in6.h>
 #include <linux/if_packet.h>
+#include <linux/llist.h>
 #include <net/flow.h>
 #include <net/page_pool.h>
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
                };
                struct rb_node          rbnode; /* used in netem, ip4 defrag, and tcp stack */
                struct list_head        list;
+               struct llist_node       ll_node;
        };
 
        union {
 
 #include <linux/indirect_call_wrapper.h>
 #include <linux/atomic.h>
 #include <linux/refcount.h>
+#include <linux/llist.h>
 #include <net/dst.h>
 #include <net/checksum.h>
 #include <net/tcp_states.h>
                struct sk_buff  *head;
                struct sk_buff  *tail;
        } sk_backlog;
+       struct llist_head defer_list;
+
 #define sk_rmem_alloc sk_backlog.rmem_alloc
 
        int                     sk_forward_alloc;
 
 }
 
 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
+
+void __sk_defer_free_flush(struct sock *sk);
+
+static inline void sk_defer_free_flush(struct sock *sk)
+{
+       if (llist_empty(&sk->defer_list))
+               return;
+       __sk_defer_free_flush(sk);
+}
+
 int tcp_filter(struct sock *sk, struct sk_buff *skb);
 void tcp_set_state(struct sock *sk, int state);
 void tcp_done(struct sock *sk);
 
                tcp_send_ack(sk);
 }
 
+void __sk_defer_free_flush(struct sock *sk)
+{
+       struct llist_node *head;
+       struct sk_buff *skb, *n;
+
+       head = llist_del_all(&sk->defer_list);
+       llist_for_each_entry_safe(skb, n, head, ll_node) {
+               prefetch(n);
+               skb_mark_not_on_list(skb);
+               __kfree_skb(skb);
+       }
+}
+EXPORT_SYMBOL(__sk_defer_free_flush);
+
 static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
 {
+       __skb_unlink(skb, &sk->sk_receive_queue);
        if (likely(skb->destructor == sock_rfree)) {
                sock_rfree(skb);
                skb->destructor = NULL;
                skb->sk = NULL;
+               if (!skb_queue_empty(&sk->sk_receive_queue) ||
+                   !llist_empty(&sk->defer_list)) {
+                       llist_add(&skb->ll_node, &sk->defer_list);
+                       return;
+               }
        }
-       sk_eat_skb(sk, skb);
+       __kfree_skb(skb);
 }
 
 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
                        /* Do not sleep, just process backlog. */
                        __sk_flush_backlog(sk);
                } else {
+                       sk_defer_free_flush(sk);
                        sk_wait_data(sk, &timeo, last);
                }
 
        ret = tcp_recvmsg_locked(sk, msg, len, nonblock, flags, &tss,
                                 &cmsg_flags);
        release_sock(sk);
+       sk_defer_free_flush(sk);
 
        if (cmsg_flags && ret >= 0) {
                if (cmsg_flags & TCP_CMSG_TS)
                sk->sk_frag.page = NULL;
                sk->sk_frag.offset = 0;
        }
-
+       sk_defer_free_flush(sk);
        sk_error_report(sk);
        return 0;
 }
                err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname,
                                                          &zc, &len, err);
                release_sock(sk);
+               sk_defer_free_flush(sk);
                if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags))
                        goto zerocopy_rcv_cmsg;
                switch (len) {