]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
memcg: move v1 events and statistics code to v1 file
authorShakeel Butt <shakeel.butt@linux.dev>
Thu, 15 Aug 2024 05:04:50 +0000 (22:04 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 2 Sep 2024 03:26:19 +0000 (20:26 -0700)
Currently the common code path for charge commit, swapout and batched
uncharge are executing v1 only code which is completely useless for the v2
deployments where CONFIG_MEMCG_V1 is disabled.  In addition, it is mucking
with IRQs which might be slow on some architectures.  Let's move all of
this code to v1 only code and remove them from v2 only deployments.

Link: https://lkml.kernel.org/r/20240815050453.1298138-5-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: T.J. Mercier <tjmercier@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol-v1.c
mm/memcontrol-v1.h
mm/memcontrol.c

index 7317f2ce8876c1a82af75dd6bb9371d0da88f8a8..540b45ab4b26283fcb7edeeb9d4da3611071dca6 100644 (file)
@@ -1502,6 +1502,43 @@ void memcg1_check_events(struct mem_cgroup *memcg, int nid)
        }
 }
 
+void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       memcg1_charge_statistics(memcg, folio_nr_pages(folio));
+       memcg1_check_events(memcg, folio_nid(folio));
+       local_irq_restore(flags);
+}
+
+void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg)
+{
+       /*
+        * Interrupts should be disabled here because the caller holds the
+        * i_pages lock which is taken with interrupts-off. It is
+        * important here to have the interrupts disabled because it is the
+        * only synchronisation we have for updating the per-CPU variables.
+        */
+       preempt_disable_nested();
+       VM_WARN_ON_IRQS_ENABLED();
+       memcg1_charge_statistics(memcg, -folio_nr_pages(folio));
+       preempt_enable_nested();
+       memcg1_check_events(memcg, folio_nid(folio));
+}
+
+void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
+                          unsigned long nr_memory, int nid)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __count_memcg_events(memcg, PGPGOUT, pgpgout);
+       __this_cpu_add(memcg->events_percpu->nr_page_events, nr_memory);
+       memcg1_check_events(memcg, nid);
+       local_irq_restore(flags);
+}
+
 static int compare_thresholds(const void *a, const void *b)
 {
        const struct mem_cgroup_threshold *_a = a;
index ef72d0b7c5c68807dc2571f53c2b5f2dcffdcf4e..376d021a2bf4d3c6147eada73ee3bf1cd9d570c3 100644 (file)
@@ -118,6 +118,11 @@ void memcg1_oom_recover(struct mem_cgroup *memcg);
 void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
 void memcg1_check_events(struct mem_cgroup *memcg, int nid);
 
+void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
+void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg);
+void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
+                          unsigned long nr_memory, int nid);
+
 void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
 
 void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages);
@@ -150,6 +155,15 @@ static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {}
 static inline void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages) {}
 static inline void memcg1_check_events(struct mem_cgroup *memcg, int nid) {}
 
+static inline void memcg1_commit_charge(struct folio *folio,
+                                       struct mem_cgroup *memcg) {}
+
+static inline void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) {}
+
+static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg,
+                                        unsigned long pgpgout,
+                                        unsigned long nr_memory, int nid) {}
+
 static inline void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) {}
 
 static inline void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages) {}
index 059c9e0ba3502683019b66590c7b237ecc93947c..6b4b51caf71bfb7316b27ae2ad2a72e64e2f83f9 100644 (file)
@@ -2351,11 +2351,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
 {
        css_get(&memcg->css);
        commit_charge(folio, memcg);
-
-       local_irq_disable();
-       memcg1_charge_statistics(memcg, folio_nr_pages(folio));
-       memcg1_check_events(memcg, folio_nid(folio));
-       local_irq_enable();
+       memcg1_commit_charge(folio, memcg);
 }
 
 static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
@@ -4575,8 +4571,6 @@ static inline void uncharge_gather_clear(struct uncharge_gather *ug)
 
 static void uncharge_batch(const struct uncharge_gather *ug)
 {
-       unsigned long flags;
-
        if (ug->nr_memory) {
                page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
                if (do_memsw_account())
@@ -4588,11 +4582,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
                memcg1_oom_recover(ug->memcg);
        }
 
-       local_irq_save(flags);
-       __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
-       __this_cpu_add(ug->memcg->events_percpu->nr_page_events, ug->nr_memory);
-       memcg1_check_events(ug->memcg, ug->nid);
-       local_irq_restore(flags);
+       memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
 
        /* drop reference from uncharge_folio */
        css_put(&ug->memcg->css);
@@ -4699,7 +4689,6 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
 {
        struct mem_cgroup *memcg;
        long nr_pages = folio_nr_pages(new);
-       unsigned long flags;
 
        VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
        VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
@@ -4727,11 +4716,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
 
        css_get(&memcg->css);
        commit_charge(new, memcg);
-
-       local_irq_save(flags);
-       memcg1_charge_statistics(memcg, nr_pages);
-       memcg1_check_events(memcg, folio_nid(new));
-       local_irq_restore(flags);
+       memcg1_commit_charge(new, memcg);
 }
 
 /**
@@ -4967,17 +4952,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
                page_counter_uncharge(&memcg->memsw, nr_entries);
        }
 
-       /*
-        * Interrupts should be disabled here because the caller holds the
-        * i_pages lock which is taken with interrupts-off. It is
-        * important here to have the interrupts disabled because it is the
-        * only synchronisation we have for updating the per-CPU variables.
-        */
-       memcg_stats_lock();
-       memcg1_charge_statistics(memcg, -nr_entries);
-       memcg_stats_unlock();
-       memcg1_check_events(memcg, folio_nid(folio));
-
+       memcg1_swapout(folio, memcg);
        css_put(&memcg->css);
 }