]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/huge_memory: only get folio_order() once during __folio_split()
authorWei Yang <richard.weiyang@gmail.com>
Fri, 10 Oct 2025 14:11:42 +0000 (14:11 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:52 +0000 (21:28 -0700)
Before splitting folio, its order keeps the same.

It is only necessary to get folio_order() once.

Also rename order to old_order to represent the original folio order.

Link: https://lkml.kernel.org/r/20251010141142.1349-1-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Acked-by: Lance Yang <lance.yang@linux.dev>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Barry Song <baohua@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 068e20711c4363cefb15c6aa381eeb4e1eab91ad..f212b4a6d8fcb6bcd037693ff00912aff2ac600c 100644 (file)
@@ -3701,7 +3701,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
        bool is_anon = folio_test_anon(folio);
        struct address_space *mapping = NULL;
        struct anon_vma *anon_vma = NULL;
-       int order = folio_order(folio);
+       int old_order = folio_order(folio);
        struct folio *new_folio, *next;
        int nr_shmem_dropped = 0;
        int remap_flags = 0;
@@ -3715,7 +3715,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
        if (folio != page_folio(split_at) || folio != page_folio(lock_at))
                return -EINVAL;
 
-       if (new_order >= folio_order(folio))
+       if (new_order >= old_order)
                return -EINVAL;
 
        if (uniform_split && !uniform_split_supported(folio, new_order, true))
@@ -3787,7 +3787,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
 
                if (uniform_split) {
                        xas_set_order(&xas, folio->index, new_order);
-                       xas_split_alloc(&xas, folio, folio_order(folio), gfp);
+                       xas_split_alloc(&xas, folio, old_order, gfp);
                        if (xas_error(&xas)) {
                                ret = xas_error(&xas);
                                goto out;
@@ -3843,13 +3843,13 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                struct lruvec *lruvec;
                int expected_refs;
 
-               if (folio_order(folio) > 1 &&
+               if (old_order > 1 &&
                    !list_empty(&folio->_deferred_list)) {
                        ds_queue->split_queue_len--;
                        if (folio_test_partially_mapped(folio)) {
                                folio_clear_partially_mapped(folio);
-                               mod_mthp_stat(folio_order(folio),
-                                             MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+                               mod_mthp_stat(old_order,
+                                       MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
                        }
                        /*
                         * Reinitialize page_deferred_list after removing the
@@ -3977,7 +3977,7 @@ fail:
        if (!ret && is_anon && !folio_is_device_private(folio))
                remap_flags = RMP_USE_SHARED_ZEROPAGE;
 
-       remap_page(folio, 1 << order, remap_flags);
+       remap_page(folio, 1 << old_order, remap_flags);
 
        /*
         * Unlock all after-split folios except the one containing
@@ -4008,9 +4008,9 @@ out_unlock:
                i_mmap_unlock_read(mapping);
 out:
        xas_destroy(&xas);
-       if (order == HPAGE_PMD_ORDER)
+       if (old_order == HPAGE_PMD_ORDER)
                count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
-       count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
+       count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
        return ret;
 }