]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
memcg: rename do_flush_stats and add force flag
authorJP Kobryn <inwardvessel@gmail.com>
Tue, 29 Oct 2024 02:11:05 +0000 (19:11 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 1 Nov 2024 04:29:33 +0000 (21:29 -0700)
Patch series "memcg: tracepoint for flushing stats", v3.

This series adds new capability for understanding frequency and circumstances
behind flushing memcg stats.

This patch (of 2):

Change the name to something more consistent with others in the file and
use double unders to signify it is associated with the
mem_cgroup_flush_stats() API call.  Additionally include a new flag that
call sites use to indicate a forced flush; skipping checks and flushing
unconditionally.  There are no changes in functionality.

Link: https://lkml.kernel.org/r/20241029021106.25587-1-inwardvessel@gmail.com
Link: https://lkml.kernel.org/r/20241029021106.25587-2-inwardvessel@gmail.com
Signed-off-by: JP Kobryn <inwardvessel@gmail.com>
Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol.c

index fbd5f55b3b68c56ed7ae42cdac2c27bed0d43624..4209bfdd0711f406f691a7417b5f3779688c0dce 100644 (file)
@@ -594,8 +594,11 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
        }
 }
 
-static void do_flush_stats(struct mem_cgroup *memcg)
+static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
 {
+       if (!force && !memcg_vmstats_needs_flush(memcg->vmstats))
+               return;
+
        if (mem_cgroup_is_root(memcg))
                WRITE_ONCE(flush_last_time, jiffies_64);
 
@@ -619,8 +622,7 @@ void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
        if (!memcg)
                memcg = root_mem_cgroup;
 
-       if (memcg_vmstats_needs_flush(memcg->vmstats))
-               do_flush_stats(memcg);
+       __mem_cgroup_flush_stats(memcg, false);
 }
 
 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
@@ -636,7 +638,7 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
         * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
         * in latency-sensitive paths is as cheap as possible.
         */
-       do_flush_stats(root_mem_cgroup);
+       __mem_cgroup_flush_stats(root_mem_cgroup, true);
        queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
 }
 
@@ -5291,11 +5293,8 @@ bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
                        break;
                }
 
-               /*
-                * mem_cgroup_flush_stats() ignores small changes. Use
-                * do_flush_stats() directly to get accurate stats for charging.
-                */
-               do_flush_stats(memcg);
+               /* Force flush to get accurate stats for charging */
+               __mem_cgroup_flush_stats(memcg, true);
                pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
                if (pages < max)
                        continue;