From: Kefeng Wang Date: Wed, 10 Sep 2025 13:39:54 +0000 (+0800) Subject: mm: hugetlb: convert to use more alloc_fresh_hugetlb_folio() X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=424d7436d5a1869d0507d76684c865ef3048c1d7;p=users%2Fjedix%2Flinux-maple.git mm: hugetlb: convert to use more alloc_fresh_hugetlb_folio() Patch series "mm: hugetlb: cleanup hugetlb folio allocation", v3. Some cleanups for hugetlb folio allocation. This patch (of 3): Simplify alloc_fresh_hugetlb_folio() and convert more functions to use it, which help us to remove prep_new_hugetlb_folio() and __prep_new_hugetlb_folio(). Link: https://lkml.kernel.org/r/20250910133958.301467-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20250910133958.301467-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Acked-by: Oscar Salvador Reviewed-by: Sidhartha Kumar Reviewed-by: Zi Yan Cc: Brendan Jackman Cc: David Hildenbrand Cc: Jane Chu Cc: Johannes Weiner Cc: Muchun Song Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cc405b8b118a..727fbe7fd0a8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1906,20 +1906,6 @@ static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio) set_hugetlb_cgroup_rsvd(folio, NULL); } -static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) -{ - init_new_hugetlb_folio(h, folio); - hugetlb_vmemmap_optimize_folio(h, folio); -} - -static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) -{ - __prep_new_hugetlb_folio(h, folio); - spin_lock_irq(&hugetlb_lock); - __prep_account_new_huge_page(h, nid); - spin_unlock_irq(&hugetlb_lock); -} - /* * Find and lock address space (mapping) in write mode. * @@ -2005,25 +1991,20 @@ static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h, } /* - * Common helper to allocate a fresh hugetlb page. All specific allocators - * should use this function to get new hugetlb pages + * Common helper to allocate a fresh hugetlb folio. All specific allocators + * should use this function to get new hugetlb folio * - * Note that returned page is 'frozen': ref count of head page and all tail - * pages is zero. + * Note that returned folio is 'frozen': ref count of head page and all tail + * pages is zero, and the accounting must be done in the caller. */ static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct folio *folio; - if (hstate_is_gigantic(h)) - folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); - else - folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); - if (!folio) - return NULL; - - prep_new_hugetlb_folio(h, folio, folio_nid(folio)); + folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); + if (folio) + hugetlb_vmemmap_optimize_folio(h, folio); return folio; } @@ -2241,12 +2222,10 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, goto out_unlock; spin_unlock_irq(&hugetlb_lock); - folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); + folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); if (!folio) return NULL; - hugetlb_vmemmap_optimize_folio(h, folio); - spin_lock_irq(&hugetlb_lock); /* * nr_huge_pages needs to be adjusted within the same lock cycle @@ -2290,6 +2269,10 @@ static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mas if (!folio) return NULL; + spin_lock_irq(&hugetlb_lock); + __prep_account_new_huge_page(h, folio_nid(folio)); + spin_unlock_irq(&hugetlb_lock); + /* fresh huge pages are frozen */ folio_ref_unfreeze(folio, 1); /* @@ -2836,11 +2819,10 @@ retry: if (!new_folio) { spin_unlock_irq(&hugetlb_lock); gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; - new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, - NULL, NULL); + new_folio = alloc_fresh_hugetlb_folio(h, gfp_mask, + nid, NULL); if (!new_folio) return -ENOMEM; - __prep_new_hugetlb_folio(h, new_folio); goto retry; }