]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/huge_memory: optimize and simplify folio stat update after split
authorWei Yang <richard.weiyang@gmail.com>
Tue, 14 Oct 2025 13:46:04 +0000 (13:46 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:55 +0000 (21:28 -0700)
The loop executed after a successful folio split currently has two
combined responsibilities:

  * updating statistics for the new folios
  * determining the folio for the next split iteration.

This commit refactors the logic to directly calculate and update folio
statistics, eliminating the need for the iteration step.

We can do this because all necessary information is already available:

  * All resulting new folios have the same order, which is @split_order.
  * The exact number of new folios can be calculated directly using
    @old_order and @split_order.
  * The folio for the subsequent split is simply the one containing
    @split_at.

By leveraging this knowledge, we can achieve the stat update more cleanly
and efficiently without the looping logic.

Link: https://lkml.kernel.org/r/20251014134606.22543-4-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index b2a48e8e4e08a6dfe68ef00ab160cc3dec090a30..59ca7924dbfbb7d29e96e72c0b39e651ed13c225 100644 (file)
@@ -3530,7 +3530,6 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
        bool is_anon = folio_test_anon(folio);
        int order = folio_order(folio);
        int start_order = uniform_split ? new_order : order - 1;
-       struct folio *next;
        int split_order;
 
        folio_clear_has_hwpoisoned(folio);
@@ -3542,9 +3541,8 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
        for (split_order = start_order;
             split_order >= new_order;
             split_order--) {
-               struct folio *end_folio = folio_next(folio);
                int old_order = folio_order(folio);
-               struct folio *new_folio;
+               int new_folios = 1UL << (old_order - split_order);
 
                /* order-1 anonymous folio is not supported */
                if (is_anon && split_order == 1)
@@ -3573,19 +3571,11 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
                pgalloc_tag_split(folio, old_order, split_order);
                __split_folio_to_order(folio, old_order, split_order);
 
-               if (is_anon)
+               if (is_anon) {
                        mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1);
-               /*
-                * Iterate through after-split folios and update folio stats.
-                */
-               for (new_folio = folio; new_folio != end_folio; new_folio = next) {
-                       next = folio_next(new_folio);
-                       if (new_folio == page_folio(split_at))
-                               folio = new_folio;
-                       if (is_anon)
-                               mod_mthp_stat(folio_order(new_folio),
-                                             MTHP_STAT_NR_ANON, 1);
+                       mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, new_folios);
                }
+               folio = page_folio(split_at);
        }
 
        return 0;