From: Wei Yang Date: Fri, 10 Oct 2025 14:11:42 +0000 (+0000) Subject: mm/huge_memory: only get folio_order() once during __folio_split() X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=94df3485260f14fd629f7c59a805597ee788bb42;p=users%2Fjedix%2Flinux-maple.git mm/huge_memory: only get folio_order() once during __folio_split() Before splitting folio, its order keeps the same. It is only necessary to get folio_order() once. Also rename order to old_order to represent the original folio order. Link: https://lkml.kernel.org/r/20251010141142.1349-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang Acked-by: Lance Yang Acked-by: David Hildenbrand Cc: Zi Yan Cc: Dev Jain Cc: Lorenzo Stoakes Cc: Baolin Wang Cc: "Liam R. Howlett" Cc: Nico Pache Cc: Ryan Roberts Cc: Barry Song Signed-off-by: Andrew Morton --- diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 068e20711c43..f212b4a6d8fc 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3701,7 +3701,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, bool is_anon = folio_test_anon(folio); struct address_space *mapping = NULL; struct anon_vma *anon_vma = NULL; - int order = folio_order(folio); + int old_order = folio_order(folio); struct folio *new_folio, *next; int nr_shmem_dropped = 0; int remap_flags = 0; @@ -3715,7 +3715,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (folio != page_folio(split_at) || folio != page_folio(lock_at)) return -EINVAL; - if (new_order >= folio_order(folio)) + if (new_order >= old_order) return -EINVAL; if (uniform_split && !uniform_split_supported(folio, new_order, true)) @@ -3787,7 +3787,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (uniform_split) { xas_set_order(&xas, folio->index, new_order); - xas_split_alloc(&xas, folio, folio_order(folio), gfp); + xas_split_alloc(&xas, folio, old_order, gfp); if (xas_error(&xas)) { ret = xas_error(&xas); goto out; @@ -3843,13 +3843,13 @@ static int __folio_split(struct folio *folio, unsigned int new_order, struct lruvec *lruvec; int expected_refs; - if (folio_order(folio) > 1 && + if (old_order > 1 && !list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; if (folio_test_partially_mapped(folio)) { folio_clear_partially_mapped(folio); - mod_mthp_stat(folio_order(folio), - MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); + mod_mthp_stat(old_order, + MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); } /* * Reinitialize page_deferred_list after removing the @@ -3977,7 +3977,7 @@ fail: if (!ret && is_anon && !folio_is_device_private(folio)) remap_flags = RMP_USE_SHARED_ZEROPAGE; - remap_page(folio, 1 << order, remap_flags); + remap_page(folio, 1 << old_order, remap_flags); /* * Unlock all after-split folios except the one containing @@ -4008,9 +4008,9 @@ out_unlock: i_mmap_unlock_read(mapping); out: xas_destroy(&xas); - if (order == HPAGE_PMD_ORDER) + if (old_order == HPAGE_PMD_ORDER) count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); - count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED); + count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED); return ret; }