]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/huge_memory: optimize old_order derivation during folio splitting
authorWei Yang <richard.weiyang@gmail.com>
Tue, 14 Oct 2025 13:46:05 +0000 (13:46 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:55 +0000 (21:28 -0700)
Folio splitting requires both the folio's original order (@old_order) and
the new target order (@split_order).

In the current implementation, @old_order is repeatedly retrieved using
folio_order().

However, for every iteration after the first, the folio being split is the
result of the previous split, meaning its order is already known to be
equal to the previous iteration's @split_order.

This commit optimizes the logic:

  * Instead of calling folio_order(), we now set @old_order directly to
    the value of @split_order from the previous iteration.

  * The initial @split_order (which was previously handled by a separate
    @start_order variable) is now directly used, and the redundant
    @start_order variable is removed.

This change avoids unnecessary function calls and simplifies the loop
setup.

Link: https://lkml.kernel.org/r/20251014134606.22543-5-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 59ca7924dbfbb7d29e96e72c0b39e651ed13c225..cf9a6c505b335bc65197f0deb1e15d4cd61449c2 100644 (file)
@@ -3528,8 +3528,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
                struct address_space *mapping, bool uniform_split)
 {
        bool is_anon = folio_test_anon(folio);
-       int order = folio_order(folio);
-       int start_order = uniform_split ? new_order : order - 1;
+       int old_order = folio_order(folio);
        int split_order;
 
        folio_clear_has_hwpoisoned(folio);
@@ -3538,10 +3537,9 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
         * split to new_order one order at a time. For uniform split,
         * folio is split to new_order directly.
         */
-       for (split_order = start_order;
+       for (split_order = uniform_split ? new_order : old_order - 1;
             split_order >= new_order;
             split_order--) {
-               int old_order = folio_order(folio);
                int new_folios = 1UL << (old_order - split_order);
 
                /* order-1 anonymous folio is not supported */
@@ -3576,6 +3574,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
                        mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, new_folios);
                }
                folio = page_folio(split_at);
+               old_order = split_order;
        }
 
        return 0;