]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/huge_memory: cache folio attribute in __split_unmapped_folio()
authorWei Yang <richard.weiyang@gmail.com>
Tue, 14 Oct 2025 13:46:02 +0000 (13:46 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:54 +0000 (21:28 -0700)
Patch series "mm/huge_memory: cleanup __split_unmapped_folio()".

This series cleans up and optimizes the internal logic of folio splitting,
particularly focusing on __split_unmapped_folio().

The goal is to improve clarity and efficiency by eliminating redundant
checks, caching stable attribute values, and simplifying the iteration
logic used for updating folio statistics.

These changes make the code easier to follow and maintain.

This patch (of 5):

During the execution of __split_unmapped_folio(), the folio's anon/!anon
attribute is invariant (not expected to change).

Therefore, it is safe and more efficient to retrieve this attribute once
at the start and reuse it throughout the function.

Link: https://lkml.kernel.org/r/20251014134606.22543-1-richard.weiyang@gmail.com
Link: https://lkml.kernel.org/r/20251014134606.22543-2-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: wang lian <lianux.mm@gmail.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index f212b4a6d8fcb6bcd037693ff00912aff2ac600c..5eb104cc90b977c49abb5b90a996c3aca129fd01 100644 (file)
@@ -3527,6 +3527,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
                struct page *split_at, struct xa_state *xas,
                struct address_space *mapping, bool uniform_split)
 {
+       bool is_anon = folio_test_anon(folio);
        int order = folio_order(folio);
        int start_order = uniform_split ? new_order : order - 1;
        bool stop_split = false;
@@ -3534,7 +3535,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
        int split_order;
        int ret = 0;
 
-       if (folio_test_anon(folio))
+       if (is_anon)
                mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
 
        folio_clear_has_hwpoisoned(folio);
@@ -3551,7 +3552,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
                struct folio *new_folio;
 
                /* order-1 anonymous folio is not supported */
-               if (folio_test_anon(folio) && split_order == 1)
+               if (is_anon && split_order == 1)
                        continue;
                if (uniform_split && split_order != new_order)
                        continue;
@@ -3603,7 +3604,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
                                if (split_order != new_order && !stop_split)
                                        continue;
                        }
-                       if (folio_test_anon(new_folio))
+                       if (is_anon)
                                mod_mthp_stat(folio_order(new_folio),
                                              MTHP_STAT_NR_ANON, 1);
                }