]> www.infradead.org Git - users/willy/linux.git/commitdiff
mm: separate folio_split_memcg_refs() from split_page_memcg()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 14 Mar 2025 13:36:11 +0000 (13:36 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 22 Mar 2025 05:03:11 +0000 (22:03 -0700)
Patch series "Minor memcg cleanups & prep for memdescs", v2.

Separate the handling of accounted folios and GFP_ACCOUNT pages for easier
to understand code.  For more detail, see
https://lore.kernel.org/linux-mm/Z9LwTOudOlCGny3f@casper.infradead.org/

This patch (of 5):

Folios always use memcg_data to refer to the mem_cgroup while pages
allocated with GFP_ACCOUNT have a pointer to the obj_cgroup.  Since the
caller already knows what it has, split the function into two and then we
don't need to check.

Move the assignment of split folio memcg_data to the point where we set up
the other parts of the new folio.  That leaves folio_split_memcg_refs()
just handling the memcg accounting.

Link: https://lkml.kernel.org/r/20250314133617.138071-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20250314133617.138071-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Zi Yan <ziy@nvidia.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcow (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memcontrol.h
mm/huge_memory.c
mm/memcontrol.c

index 57664e2a8fb7b05376495879c2606e8e4b35e427..d090089c549741d38f3c37c0d493cb115cb5842a 100644 (file)
@@ -1039,6 +1039,8 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
 }
 
 void split_page_memcg(struct page *head, int old_order, int new_order);
+void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
+               unsigned new_order);
 
 static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
 {
@@ -1463,6 +1465,11 @@ static inline void split_page_memcg(struct page *head, int old_order, int new_or
 {
 }
 
+static inline void folio_split_memcg_refs(struct folio *folio,
+               unsigned old_order, unsigned new_order)
+{
+}
+
 static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
 {
        return 0;
index 10a86b681cf1e982797c9f2c89655aae19f267ba..2a47682d1ab7775d164b44d36839b463bf5d9c6d 100644 (file)
@@ -3394,6 +3394,9 @@ static void __split_folio_to_order(struct folio *folio, int old_order,
                        folio_set_young(new_folio);
                if (folio_test_idle(folio))
                        folio_set_idle(new_folio);
+#ifdef CONFIG_MEMCG
+               new_folio->memcg_data = folio->memcg_data;
+#endif
 
                folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
        }
@@ -3525,18 +3528,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
                        }
                }
 
-               /*
-                * Reset any memcg data overlay in the tail pages.
-                * folio_nr_pages() is unreliable until prep_compound_page()
-                * was called again.
-                */
-#ifdef NR_PAGES_IN_LARGE_FOLIO
-               folio->_nr_pages = 0;
-#endif
-
-
-               /* complete memcg works before add pages to LRU */
-               split_page_memcg(&folio->page, old_order, split_order);
+               folio_split_memcg_refs(folio, old_order, split_order);
                split_page_owner(&folio->page, old_order, split_order);
                pgalloc_tag_split(folio, old_order, split_order);
 
index ce57660bf5a2c29993b7a24abdecb4c5e1acf58e..f267b309b5b7194fcb96e6b9cff9eb7dbbe97804 100644 (file)
@@ -3081,10 +3081,19 @@ void split_page_memcg(struct page *head, int old_order, int new_order)
        for (i = new_nr; i < old_nr; i += new_nr)
                folio_page(folio, i)->memcg_data = folio->memcg_data;
 
-       if (folio_memcg_kmem(folio))
-               obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
-       else
-               css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1);
+       obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
+}
+
+void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
+               unsigned new_order)
+{
+       unsigned new_refs;
+
+       if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
+               return;
+
+       new_refs = (1 << (old_order - new_order)) - 1;
+       css_get_many(&__folio_memcg(folio)->css, new_refs);
 }
 
 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)