]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
tcp: increment sk_drops for dropped rx packets
authorEric Dumazet <edumazet@google.com>
Fri, 1 Apr 2016 15:52:19 +0000 (08:52 -0700)
committerBrian Maly <brian.maly@oracle.com>
Fri, 14 Sep 2018 03:14:21 +0000 (23:14 -0400)
Now ss can report sk_drops, we can instruct TCP to increment
this per socket counter when it drops an incoming frame, to refine
monitoring and debugging.

Following patch takes care of listeners drops.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(adapted from v4.9.x commit 532182cd610782db8c18230c2747626562032205)

Orabug: 28639707
CVE: CVE-2018-5390

Signed-off-by: Shannon Nelson <shannon.nelson@oracle.com>
Reviewed-by: Jack Vogel <jack.vogel@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
include/net/sock.h
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv6/tcp_ipv6.c

index 71431f99fcfc4be159d11b7d28098714c97efa22..9b37d9e2482b0d904cea5d983c33d5148910eeb6 100644 (file)
@@ -2114,6 +2114,13 @@ sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
        SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
 }
 
+static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+{
+       int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+
+       atomic_add(segs, &sk->sk_drops);
+}
+
 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
                           struct sk_buff *skb);
 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
index 3c0f3c28444144f3442e054649adfc6ba0463ad4..0062783d277839ca3c47cfe5ea3e7ed82f2f10ae 100644 (file)
@@ -4231,6 +4231,12 @@ static bool tcp_try_coalesce(struct sock *sk,
        return true;
 }
 
+static void tcp_drop(struct sock *sk, struct sk_buff *skb)
+{
+       sk_drops_add(sk, skb);
+       __kfree_skb(skb);
+}
+
 /* This one checks to see if we can put data from the
  * out_of_order queue into the receive_queue.
  */
@@ -4255,7 +4261,7 @@ static void tcp_ofo_queue(struct sock *sk)
                __skb_unlink(skb, &tp->out_of_order_queue);
                if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
                        SOCK_DEBUG(sk, "ofo packet was already received\n");
-                       __kfree_skb(skb);
+                       tcp_drop(sk, skb);
                        continue;
                }
                SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
@@ -4307,7 +4313,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 
        if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
-               __kfree_skb(skb);
+               tcp_drop(sk, skb);
                return;
        }
 
@@ -4371,7 +4377,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
                if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
                        /* All the bits are present. Drop. */
                        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
-                       __kfree_skb(skb);
+                       tcp_drop(sk, skb);
                        skb = NULL;
                        tcp_dsack_set(sk, seq, end_seq);
                        goto add_sack;
@@ -4410,7 +4416,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
                tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
                                 TCP_SKB_CB(skb1)->end_seq);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
-               __kfree_skb(skb1);
+               tcp_drop(sk, skb1);
        }
 
 add_sack:
@@ -4493,12 +4499,13 @@ err:
 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       int eaten = -1;
        bool fragstolen = false;
+       int eaten = -1;
 
-       if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
-               goto drop;
-
+       if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
+               __kfree_skb(skb);
+               return;
+       }
        skb_dst_drop(skb);
        __skb_pull(skb, tcp_hdr(skb)->doff * 4);
 
@@ -4578,7 +4585,7 @@ out_of_window:
                tcp_enter_quickack_mode(sk);
                inet_csk_schedule_ack(sk);
 drop:
-               __kfree_skb(skb);
+               tcp_drop(sk, skb);
                return;
        }
 
@@ -5155,7 +5162,7 @@ syn_challenge:
        return true;
 
 discard:
-       __kfree_skb(skb);
+       tcp_drop(sk, skb);
        return false;
 }
 
@@ -5373,7 +5380,7 @@ csum_error:
        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 
 discard:
-       __kfree_skb(skb);
+       tcp_drop(sk, skb);
 }
 EXPORT_SYMBOL(tcp_rcv_established);
 
@@ -5620,7 +5627,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                                                  TCP_DELACK_MAX, TCP_RTO_MAX);
 
 discard:
-                       __kfree_skb(skb);
+                       tcp_drop(sk, skb);
                        return 0;
                } else {
                        tcp_send_ack(sk);
@@ -5991,7 +5998,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
        if (!queued) {
 discard:
-               __kfree_skb(skb);
+               tcp_drop(sk, skb);
        }
        return 0;
 }
index 735038ffdae1161dc41f30090b1592348b8cc3ce..518c59a8a82b8c1c8b99d589a1632c7bf46af69f 100644 (file)
@@ -1795,6 +1795,7 @@ discard_it:
        return 0;
 
 discard_and_relse:
+       sk_drops_add(sk, skb);
        sock_put(sk);
        goto discard_it;
 
index 3ac0d8c3b00843899468b29584ca63b42973b50d..36706af1e3f8f3916722a9237aca96eb936cf811 100644 (file)
@@ -1573,6 +1573,7 @@ discard_it:
        return 0;
 
 discard_and_relse:
+       sk_drops_add(sk, skb);
        sock_put(sk);
        goto discard_it;