Patch series "mm: memcg: page counters optimizations", v3.
This patchset contains 3 independent small optimizations of page counters.
This patch (of 3):
Memory protection (min/low) requires a constant tracking of protected
memory usage.  propagate_protected_usage() is called on each page counters
update and does a number of operations even in cases when the actual
memory protection functionality is not supported (e.g.  hugetlb cgroups or
memcg swap counters).
It's obviously inefficient and leads to a waste of CPU cycles.  It can be
addressed by calling propagate_protected_usage() only for the counters
which do support memory guarantees.  As of now it's only memcg->memory -
the unified memory memcg counter.
Link: https://lkml.kernel.org/r/20240726203110.1577216-2-roman.gushchin@linux.dev
Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
        /* Keep all the read most fields in a separete cacheline. */
        CACHELINE_PADDING(_pad2_);
 
+       bool protection_support;
        unsigned long min;
        unsigned long low;
        unsigned long high;
 #define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
 #endif
 
+/*
+ * Protection is supported only for the first counter (with id 0).
+ */
 static inline void page_counter_init(struct page_counter *counter,
-                                    struct page_counter *parent)
+                                    struct page_counter *parent,
+                                    bool protection_support)
 {
        atomic_long_set(&counter->usage, 0);
        counter->max = PAGE_COUNTER_MAX;
        counter->parent = parent;
+       counter->protection_support = protection_support;
 }
 
 static inline unsigned long page_counter_read(struct page_counter *counter)
 
                }
                page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
                                                                     idx),
-                                 fault_parent);
+                                 fault_parent, false);
                page_counter_init(
                        hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
-                       rsvd_parent);
+                       rsvd_parent, false);
 
                limit = round_down(PAGE_COUNTER_MAX,
                                   pages_per_huge_page(&hstates[idx]));
 
        if (parent) {
                WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
 
-               page_counter_init(&memcg->memory, &parent->memory);
-               page_counter_init(&memcg->swap, &parent->swap);
+               page_counter_init(&memcg->memory, &parent->memory, true);
+               page_counter_init(&memcg->swap, &parent->swap, false);
 #ifdef CONFIG_MEMCG_V1
                WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
-               page_counter_init(&memcg->kmem, &parent->kmem);
-               page_counter_init(&memcg->tcpmem, &parent->tcpmem);
+               page_counter_init(&memcg->kmem, &parent->kmem, false);
+               page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
 #endif
        } else {
                init_memcg_stats();
                init_memcg_events();
-               page_counter_init(&memcg->memory, NULL);
-               page_counter_init(&memcg->swap, NULL);
+               page_counter_init(&memcg->memory, NULL, true);
+               page_counter_init(&memcg->swap, NULL, false);
 #ifdef CONFIG_MEMCG_V1
-               page_counter_init(&memcg->kmem, NULL);
-               page_counter_init(&memcg->tcpmem, NULL);
+               page_counter_init(&memcg->kmem, NULL, false);
+               page_counter_init(&memcg->tcpmem, NULL, false);
 #endif
                root_mem_cgroup = memcg;
                return &memcg->css;
 
 #include <linux/bug.h>
 #include <asm/page.h>
 
+static bool track_protection(struct page_counter *c)
+{
+       return c->protection_support;
+}
+
 static void propagate_protected_usage(struct page_counter *c,
                                      unsigned long usage)
 {
                new = 0;
                atomic_long_set(&counter->usage, new);
        }
-       propagate_protected_usage(counter, new);
+       if (track_protection(counter))
+               propagate_protected_usage(counter, new);
 }
 
 /**
 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
 {
        struct page_counter *c;
+       bool protection = track_protection(counter);
 
        for (c = counter; c; c = c->parent) {
                long new;
 
                new = atomic_long_add_return(nr_pages, &c->usage);
-               propagate_protected_usage(c, new);
+               if (protection)
+                       propagate_protected_usage(c, new);
                /*
                 * This is indeed racy, but we can live with some
                 * inaccuracy in the watermark.
                             struct page_counter **fail)
 {
        struct page_counter *c;
+       bool protection = track_protection(counter);
 
        for (c = counter; c; c = c->parent) {
                long new;
                        *fail = c;
                        goto failed;
                }
-               propagate_protected_usage(c, new);
+               if (protection)
+                       propagate_protected_usage(c, new);
+
                /*
                 * Just like with failcnt, we can live with some
                 * inaccuracy in the watermark.