]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
tcp: introduce tcp_under_memory_pressure()
authorEric Dumazet <edumazet@google.com>
Fri, 15 May 2015 19:39:27 +0000 (12:39 -0700)
committerBrian Maly <brian.maly@oracle.com>
Fri, 14 Sep 2018 03:14:22 +0000 (23:14 -0400)
Introduce an optimized version of sk_under_memory_pressure()
for TCP. Our intent is to use it in fast paths.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(adapted from v4.9.x commit b8da51ebb1aa93908350f95efae73aecbc2e266c)

Orabug: 28639707
CVE: CVE-2018-5390

Signed-off-by: Shannon Nelson <shannon.nelson@oracle.com>
Reviewed-by: Jack Vogel <jack.vogel@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
include/net/tcp.h
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c

index 22c9eb3eea842bb02bc3bf5647dcd13bb26b7ccc..5ef4f70947932ad4866aa72f8ebe74af91caf0f2 100644 (file)
@@ -286,6 +286,14 @@ extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
 extern int tcp_memory_pressure;
 
+/* optimized version of sk_under_memory_pressure() for TCP sockets */
+static inline bool tcp_under_memory_pressure(const struct sock *sk)
+{
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               return !!sk->sk_cgrp->memory_pressure;
+
+       return tcp_memory_pressure;
+}
 /*
  * The next routines deal with comparing 32 bit unsigned ints
  * and worry about wraparound (automatic with unsigned arithmetic).
index 0062783d277839ca3c47cfe5ea3e7ed82f2f10ae..10685876036cd8891ee10a1fdd6daaab45dbc896 100644 (file)
@@ -360,7 +360,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
        /* Check #1 */
        if (tp->rcv_ssthresh < tp->window_clamp &&
            (int)tp->rcv_ssthresh < tcp_space(sk) &&
-           !sk_under_memory_pressure(sk)) {
+           !tcp_under_memory_pressure(sk)) {
                int incr;
 
                /* Check #2. Increase window, if skb with such overhead
@@ -447,7 +447,7 @@ static void tcp_clamp_window(struct sock *sk)
 
        if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
-           !sk_under_memory_pressure(sk) &&
+           !tcp_under_memory_pressure(sk) &&
            sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
                sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
                                    sysctl_tcp_rmem[2]);
@@ -4816,7 +4816,7 @@ static int tcp_prune_queue(struct sock *sk)
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                tcp_clamp_window(sk);
-       else if (sk_under_memory_pressure(sk))
+       else if (tcp_under_memory_pressure(sk))
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
@@ -4860,7 +4860,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
                return false;
 
        /* If we are under global TCP memory pressure, do not expand.  */
-       if (sk_under_memory_pressure(sk))
+       if (tcp_under_memory_pressure(sk))
                return false;
 
        /* If we are under soft global TCP memory pressure, do not expand.  */
index c420c4e741f7ba5f5921c09c04de83a7ae610703..0c12ca1d2d02aee8088a3ea248230204a0f385bf 100644 (file)
@@ -2414,7 +2414,7 @@ u32 __tcp_select_window(struct sock *sk)
        if (free_space < (full_space >> 1)) {
                icsk->icsk_ack.quick = 0;
 
-               if (sk_under_memory_pressure(sk))
+               if (tcp_under_memory_pressure(sk))
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh,
                                               4U * tp->advmss);
 
@@ -2865,7 +2865,7 @@ void tcp_send_fin(struct sock *sk)
         * Note: in the latter case, FIN packet will be sent after a timeout,
         * as TCP stack thinks it has already been transmitted.
         */
-       if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
+       if (tskb && (tcp_send_head(sk) || tcp_under_memory_pressure(sk))) {
 coalesce:
                TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
                TCP_SKB_CB(tskb)->end_seq++;
index 8c65dc147d8bcfb58e14c20b774711ffbcc30d5a..111105ca76d08129b9dfa53e5d8283777fb37992 100644 (file)
@@ -247,7 +247,7 @@ void tcp_delack_timer_handler(struct sock *sk)
        }
 
 out:
-       if (sk_under_memory_pressure(sk))
+       if (tcp_under_memory_pressure(sk))
                sk_mem_reclaim(sk);
 }