]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
memcg: manually uninline __memcg_memory_event
authorShakeel Butt <shakeel.butt@linux.dev>
Tue, 21 Oct 2025 23:44:25 +0000 (16:44 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 22 Oct 2025 01:51:54 +0000 (18:51 -0700)
__memcg_memory_event() has been unnecessarily marked inline even when it
is not really performance critical.  It is usually called to track extreme
conditions.  Over the time, it has evolved to include more functionality
and inlining it is causing more harm.

Before the patch:
$ size mm/memcontrol.o net/ipv4/tcp_input.o net/ipv4/tcp_output.o
   text    data     bss     dec     hex filename
  35645   10574    4192   50411    c4eb mm/memcontrol.o
  54738    1658       0   56396    dc4c net/ipv4/tcp_input.o
  34644    1065       0   35709    8b7d net/ipv4/tcp_output.o

After the patch:
$ size mm/memcontrol.o net/ipv4/tcp_input.o net/ipv4/tcp_output.o
   text    data     bss     dec     hex filename
  35137   10446    4192   49775    c26f mm/memcontrol.o
  54322    1562       0   55884    da4c net/ipv4/tcp_input.o
  34492    1017       0   35509    8ab5 net/ipv4/tcp_output.o

Link: https://lkml.kernel.org/r/20251021234425.1885471-1-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c

index d37e7c93bb8c185eb0dc6373472d3e8679f76d75..8d2e250535a8a87a174249112cad169ac23b373e 100644 (file)
@@ -1002,36 +1002,8 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
        count_memcg_events_mm(mm, idx, 1);
 }
 
-static inline void __memcg_memory_event(struct mem_cgroup *memcg,
-                                       enum memcg_memory_event event,
-                                       bool allow_spinning)
-{
-       bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
-                         event == MEMCG_SWAP_FAIL;
-
-       /* For now only MEMCG_MAX can happen with !allow_spinning context. */
-       VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX);
-
-       atomic_long_inc(&memcg->memory_events_local[event]);
-       if (!swap_event && allow_spinning)
-               cgroup_file_notify(&memcg->events_local_file);
-
-       do {
-               atomic_long_inc(&memcg->memory_events[event]);
-               if (allow_spinning) {
-                       if (swap_event)
-                               cgroup_file_notify(&memcg->swap_events_file);
-                       else
-                               cgroup_file_notify(&memcg->events_file);
-               }
-
-               if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
-                       break;
-               if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
-                       break;
-       } while ((memcg = parent_mem_cgroup(memcg)) &&
-                !mem_cgroup_is_root(memcg));
-}
+void __memcg_memory_event(struct mem_cgroup *memcg,
+                         enum memcg_memory_event event, bool allow_spinning);
 
 static inline void memcg_memory_event(struct mem_cgroup *memcg,
                                      enum memcg_memory_event event)
index 1a95049d8b88e458219ea0c72af2d3fe0ee429c7..93f7c76f0ce96009c0ae241359423df7aaa0aca8 100644 (file)
@@ -1626,6 +1626,37 @@ unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
        return page_counter_read(&memcg->memory);
 }
 
+void __memcg_memory_event(struct mem_cgroup *memcg,
+                         enum memcg_memory_event event, bool allow_spinning)
+{
+       bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
+                         event == MEMCG_SWAP_FAIL;
+
+       /* For now only MEMCG_MAX can happen with !allow_spinning context. */
+       VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX);
+
+       atomic_long_inc(&memcg->memory_events_local[event]);
+       if (!swap_event && allow_spinning)
+               cgroup_file_notify(&memcg->events_local_file);
+
+       do {
+               atomic_long_inc(&memcg->memory_events[event]);
+               if (allow_spinning) {
+                       if (swap_event)
+                               cgroup_file_notify(&memcg->swap_events_file);
+                       else
+                               cgroup_file_notify(&memcg->events_file);
+               }
+
+               if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+                       break;
+               if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
+                       break;
+       } while ((memcg = parent_mem_cgroup(memcg)) &&
+                !mem_cgroup_is_root(memcg));
+}
+EXPORT_SYMBOL(__memcg_memory_event);
+
 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
                                     int order)
 {