This reverts the following patches :
- commit 
2e05fcae83c4 ("tcp: fix compile error if !CONFIG_SYSCTL")
- commit 
4f661542a402 ("tcp: fix zerocopy and notsent_lowat issues")
- commit 
472c2e07eef0 ("tcp: add one skb cache for tx")
- commit 
8b27dae5a2e8 ("tcp: add one skb cache for rx")
Having a cache of one skb (in each direction) per TCP socket is fragile,
since it can cause a significant increase of memory needs,
and not good enough for high speed flows anyway where more than one skb
is needed.
We want instead to add a generic infrastructure, with more flexible
per-cpu caches, for alien NUMA nodes.
Acked-by: Paolo Abeni <pabeni@redhat.com>
Acked-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
 
        in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
        Default: 1000
 
-tcp_rx_skb_cache - BOOLEAN
-       Controls a per TCP socket cache of one skb, that might help
-       performance of some workloads. This might be dangerous
-       on systems with a lot of TCP sockets, since it increases
-       memory usage.
-
-       Default: 0 (disabled)
-
 UDP variables
 =============
 
 
   *    @sk_dst_cache: destination cache
   *    @sk_dst_pending_confirm: need to confirm neighbour
   *    @sk_policy: flow policy
-  *    @sk_rx_skb_cache: cache copy of recently accessed RX skb
   *    @sk_receive_queue: incoming packets
   *    @sk_wmem_alloc: transmit queue bytes committed
   *    @sk_tsq_flags: TCP Small Queues flags
   *    @sk_peek_off: current peek_offset value
   *    @sk_send_head: front of stuff to transmit
   *    @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
-  *    @sk_tx_skb_cache: cache copy of recently accessed TX skb
   *    @sk_security: used by security modules
   *    @sk_mark: generic packet mark
   *    @sk_cgrp_data: cgroup data for this cgroup
        atomic_t                sk_drops;
        int                     sk_rcvlowat;
        struct sk_buff_head     sk_error_queue;
-       struct sk_buff          *sk_rx_skb_cache;
        struct sk_buff_head     sk_receive_queue;
        /*
         * The backlog queue is special, it is always used with
                struct sk_buff  *sk_send_head;
                struct rb_root  tcp_rtx_queue;
        };
-       struct sk_buff          *sk_tx_skb_cache;
        struct sk_buff_head     sk_write_queue;
        __s32                   sk_peek_off;
        int                     sk_write_pending;
                __sk_mem_reclaim(sk, 1 << 20);
 }
 
-DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
 {
        sk_wmem_queued_add(sk, -skb->truesize);
        sk_mem_uncharge(sk, skb->truesize);
-       if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
-           !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
-               skb_ext_reset(skb);
-               skb_zcopy_clear(skb, true);
-               sk->sk_tx_skb_cache = skb;
-               return;
-       }
        __kfree_skb(skb);
 }
 
                           &skb_shinfo(skb)->tskey);
 }
 
-DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
 /**
  * sk_eat_skb - Release a skb if it is no longer needed
  * @sk: socket to eat this skb from
 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
 {
        __skb_unlink(skb, &sk->sk_receive_queue);
-       if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
-           !sk->sk_rx_skb_cache) {
-               sk->sk_rx_skb_cache = skb;
-               skb_orphan(skb);
-               return;
-       }
        __kfree_skb(skb);
 }
 
 
        struct inet_sock *inet = inet_sk(sk);
 
        __skb_queue_purge(&sk->sk_receive_queue);
-       if (sk->sk_rx_skb_cache) {
-               __kfree_skb(sk->sk_rx_skb_cache);
-               sk->sk_rx_skb_cache = NULL;
-       }
        __skb_queue_purge(&sk->sk_error_queue);
 
        sk_mem_reclaim(sk);
 
                .extra1         = &sysctl_fib_sync_mem_min,
                .extra2         = &sysctl_fib_sync_mem_max,
        },
-       {
-               .procname       = "tcp_rx_skb_cache",
-               .data           = &tcp_rx_skb_cache_key.key,
-               .mode           = 0644,
-               .proc_handler   = proc_do_static_key,
-       },
-       {
-               .procname       = "tcp_tx_skb_cache",
-               .data           = &tcp_tx_skb_cache_key.key,
-               .mode           = 0644,
-               .proc_handler   = proc_do_static_key,
-       },
        { }
 };
 
 
 unsigned long tcp_memory_pressure __read_mostly;
 EXPORT_SYMBOL_GPL(tcp_memory_pressure);
 
-DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
-EXPORT_SYMBOL(tcp_rx_skb_cache_key);
-
-DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
-
 void tcp_enter_memory_pressure(struct sock *sk)
 {
        unsigned long val;
 {
        struct sk_buff *skb;
 
-       if (likely(!size)) {
-               skb = sk->sk_tx_skb_cache;
-               if (skb) {
-                       skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
-                       sk->sk_tx_skb_cache = NULL;
-                       pskb_trim(skb, 0);
-                       INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
-                       skb_shinfo(skb)->tx_flags = 0;
-                       memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb));
-                       return skb;
-               }
-       }
        /* The TCP header must be at least 32-bit aligned.  */
        size = ALIGN(size, 4);
 
                sk_wmem_free_skb(sk, skb);
        }
        tcp_rtx_queue_purge(sk);
-       skb = sk->sk_tx_skb_cache;
-       if (skb) {
-               __kfree_skb(skb);
-               sk->sk_tx_skb_cache = NULL;
-       }
        INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
        sk_mem_reclaim(sk);
        tcp_clear_all_retrans_hints(tcp_sk(sk));
 
        tcp_clear_xmit_timers(sk);
        __skb_queue_purge(&sk->sk_receive_queue);
-       if (sk->sk_rx_skb_cache) {
-               __kfree_skb(sk->sk_rx_skb_cache);
-               sk->sk_rx_skb_cache = NULL;
-       }
        WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
        tp->urg_data = 0;
        tcp_write_queue_purge(sk);
 
 int tcp_v4_rcv(struct sk_buff *skb)
 {
        struct net *net = dev_net(skb->dev);
-       struct sk_buff *skb_to_free;
        int sdif = inet_sdif(skb);
        int dif = inet_iif(skb);
        const struct iphdr *iph;
        tcp_segs_in(tcp_sk(sk), skb);
        ret = 0;
        if (!sock_owned_by_user(sk)) {
-               skb_to_free = sk->sk_rx_skb_cache;
-               sk->sk_rx_skb_cache = NULL;
                ret = tcp_v4_do_rcv(sk, skb);
        } else {
                if (tcp_add_backlog(sk, skb))
                        goto discard_and_relse;
-               skb_to_free = NULL;
        }
        bh_unlock_sock(sk);
-       if (skb_to_free)
-               __kfree_skb(skb_to_free);
 
 put_and_return:
        if (refcounted)
 
 
 INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
 {
-       struct sk_buff *skb_to_free;
        int sdif = inet6_sdif(skb);
        int dif = inet6_iif(skb);
        const struct tcphdr *th;
        tcp_segs_in(tcp_sk(sk), skb);
        ret = 0;
        if (!sock_owned_by_user(sk)) {
-               skb_to_free = sk->sk_rx_skb_cache;
-               sk->sk_rx_skb_cache = NULL;
                ret = tcp_v6_do_rcv(sk, skb);
        } else {
                if (tcp_add_backlog(sk, skb))
                        goto discard_and_relse;
-               skb_to_free = NULL;
        }
        bh_unlock_sock(sk);
-       if (skb_to_free)
-               __kfree_skb(skb_to_free);
 put_and_return:
        if (refcounted)
                sock_put(sk);