From d560b485d61d520c44fec9d08602dae94d79a1f6 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Tue, 14 Oct 2025 13:46:03 +0000 Subject: [PATCH] mm/huge_memory: update folio stat after successful split When a folio is successfully split, its statistics must be updated. The current implementation complicates this process: * It iterates over the resulting new folios. * It uses a flag (@stop_split) to conditionally skip updating the stat for the folio at @split_at during the loop. * It then attempts to update the skipped stat on a subsequent failure path. This logic is unnecessarily hard to follow. This commit refactors the code to update the folio statistics only after a successful split. This makes the logic much cleaner and sets the stage for further simplification of the stat-handling code. Link: https://lkml.kernel.org/r/20251014134606.22543-3-richard.weiyang@gmail.com Signed-off-by: Wei Yang Cc: Zi Yan Cc: Baolin Wang Cc: Barry Song Cc: David Hildenbrand Cc: Dev Jain Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Mariano Pache Cc: Ryan Roberts Signed-off-by: Andrew Morton --- mm/huge_memory.c | 44 +++++++++++--------------------------------- 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5eb104cc90b9..b2a48e8e4e08 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3530,13 +3530,8 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, bool is_anon = folio_test_anon(folio); int order = folio_order(folio); int start_order = uniform_split ? new_order : order - 1; - bool stop_split = false; struct folio *next; int split_order; - int ret = 0; - - if (is_anon) - mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); folio_clear_has_hwpoisoned(folio); @@ -3545,7 +3540,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, * folio is split to new_order directly. */ for (split_order = start_order; - split_order >= new_order && !stop_split; + split_order >= new_order; split_order--) { struct folio *end_folio = folio_next(folio); int old_order = folio_order(folio); @@ -3568,49 +3563,32 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, else { xas_set_order(xas, folio->index, split_order); xas_try_split(xas, folio, old_order); - if (xas_error(xas)) { - ret = xas_error(xas); - stop_split = true; - } + if (xas_error(xas)) + return xas_error(xas); } } - if (!stop_split) { - folio_split_memcg_refs(folio, old_order, split_order); - split_page_owner(&folio->page, old_order, split_order); - pgalloc_tag_split(folio, old_order, split_order); - - __split_folio_to_order(folio, old_order, split_order); - } + folio_split_memcg_refs(folio, old_order, split_order); + split_page_owner(&folio->page, old_order, split_order); + pgalloc_tag_split(folio, old_order, split_order); + __split_folio_to_order(folio, old_order, split_order); + if (is_anon) + mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1); /* * Iterate through after-split folios and update folio stats. - * But in buddy allocator like split, the folio - * containing the specified page is skipped until its order - * is new_order, since the folio will be worked on in next - * iteration. */ for (new_folio = folio; new_folio != end_folio; new_folio = next) { next = folio_next(new_folio); - /* - * for buddy allocator like split, new_folio containing - * @split_at page could be split again, thus do not - * change stats yet. Wait until new_folio's order is - * @new_order or stop_split is set to true by the above - * xas_split() failure. - */ - if (new_folio == page_folio(split_at)) { + if (new_folio == page_folio(split_at)) folio = new_folio; - if (split_order != new_order && !stop_split) - continue; - } if (is_anon) mod_mthp_stat(folio_order(new_folio), MTHP_STAT_NR_ANON, 1); } } - return ret; + return 0; } bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, -- 2.51.0