]> www.infradead.org Git - users/willy/linux.git/commitdiff
net-memcg: Introduce mem_cgroup_sk_enabled().
authorKuniyuki Iwashima <kuniyu@google.com>
Fri, 15 Aug 2025 20:16:15 +0000 (20:16 +0000)
committerJakub Kicinski <kuba@kernel.org>
Wed, 20 Aug 2025 02:20:59 +0000 (19:20 -0700)
The socket memcg feature is enabled by a static key and
only works for non-root cgroup.

We check both conditions in many places.

Let's factorise it as a helper function.

Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Link: https://patch.msgid.link/20250815201712.1745332-8-kuniyu@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/proto_memory.h
include/net/sock.h
include/net/tcp.h
net/core/sock.c
net/ipv4/tcp_output.c

index a6ab2f4f5e28a21ab63acb38caf157f6a7415a0b..859e63de81c49e7f4ffc43e6575d7e3a15ca2455 100644 (file)
@@ -31,7 +31,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
        if (!sk->sk_prot->memory_pressure)
                return false;
 
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
+       if (mem_cgroup_sk_enabled(sk) &&
            mem_cgroup_under_socket_pressure(sk->sk_memcg))
                return true;
 
index 811f95ea8d00cee3029432be104a2dad7bec4767..3efdf680401dde1c576f9beec1758fc87fcc4ae8 100644 (file)
@@ -2599,11 +2599,21 @@ static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk)
 {
        return sk->sk_memcg;
 }
+
+static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
+{
+       return mem_cgroup_sockets_enabled && mem_cgroup_from_sk(sk);
+}
 #else
 static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk)
 {
        return NULL;
 }
+
+static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
+{
+       return false;
+}
 #endif
 
 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
index 526a26e7a1505141cd0a06799d76d4b05bf2b7a0..9f01b6be64446ce77864ec287f49beb94ff09444 100644 (file)
@@ -275,7 +275,7 @@ extern unsigned long tcp_memory_pressure;
 /* optimized version of sk_under_memory_pressure() for TCP sockets */
 static inline bool tcp_under_memory_pressure(const struct sock *sk)
 {
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
+       if (mem_cgroup_sk_enabled(sk) &&
            mem_cgroup_under_socket_pressure(sk->sk_memcg))
                return true;
 
index 000940ecf360e09f5eda09231bab8b94d7f3189f..ab658fe23e1e633cea314841157d6ff4bb42dc15 100644 (file)
@@ -1032,7 +1032,7 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
        bool charged;
        int pages;
 
-       if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk))
+       if (!mem_cgroup_sk_enabled(sk) || !sk_has_account(sk))
                return -EOPNOTSUPP;
 
        if (!bytes)
@@ -3271,7 +3271,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
        sk_memory_allocated_add(sk, amt);
        allocated = sk_memory_allocated(sk);
 
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg) {
+       if (mem_cgroup_sk_enabled(sk)) {
                memcg = sk->sk_memcg;
                charged = mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge());
                if (!charged)
@@ -3398,7 +3398,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
 {
        sk_memory_allocated_sub(sk, amount);
 
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg)
+       if (mem_cgroup_sk_enabled(sk))
                mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
 
        if (sk_under_global_memory_pressure(sk) &&
index caf11920a87861955b0d7eeb2c0c03973d6e58ee..37fb320e6f70517176bf56a41dc1a6262a100323 100644 (file)
@@ -3578,7 +3578,7 @@ void sk_forced_mem_schedule(struct sock *sk, int size)
        sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
        sk_memory_allocated_add(sk, amt);
 
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg)
+       if (mem_cgroup_sk_enabled(sk))
                mem_cgroup_charge_skmem(sk->sk_memcg, amt,
                                        gfp_memcg_charge() | __GFP_NOFAIL);
 }