]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: hugetlb: convert to use more alloc_fresh_hugetlb_folio()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Wed, 10 Sep 2025 13:39:54 +0000 (21:39 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:40 +0000 (17:25 -0700)
Patch series "mm: hugetlb: cleanup hugetlb folio allocation", v3.

Some cleanups for hugetlb folio allocation.

This patch (of 3):

Simplify alloc_fresh_hugetlb_folio() and convert more functions to use it,
which help us to remove prep_new_hugetlb_folio() and
__prep_new_hugetlb_folio().

Link: https://lkml.kernel.org/r/20250910133958.301467-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20250910133958.301467-2-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index cc405b8b118adea58b31da0ed69e465eb97d63e1..727fbe7fd0a899e3d06f71aee41ba1f86e5074a7 100644 (file)
@@ -1906,20 +1906,6 @@ static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
        set_hugetlb_cgroup_rsvd(folio, NULL);
 }
 
-static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
-{
-       init_new_hugetlb_folio(h, folio);
-       hugetlb_vmemmap_optimize_folio(h, folio);
-}
-
-static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
-{
-       __prep_new_hugetlb_folio(h, folio);
-       spin_lock_irq(&hugetlb_lock);
-       __prep_account_new_huge_page(h, nid);
-       spin_unlock_irq(&hugetlb_lock);
-}
-
 /*
  * Find and lock address space (mapping) in write mode.
  *
@@ -2005,25 +1991,20 @@ static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
 }
 
 /*
- * Common helper to allocate a fresh hugetlb page. All specific allocators
- * should use this function to get new hugetlb pages
+ * Common helper to allocate a fresh hugetlb folio. All specific allocators
+ * should use this function to get new hugetlb folio
  *
- * Note that returned page is 'frozen':  ref count of head page and all tail
- * pages is zero.
+ * Note that returned folio is 'frozen':  ref count of head page and all tail
+ * pages is zero, and the accounting must be done in the caller.
  */
 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
                gfp_t gfp_mask, int nid, nodemask_t *nmask)
 {
        struct folio *folio;
 
-       if (hstate_is_gigantic(h))
-               folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
-       else
-               folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
-       if (!folio)
-               return NULL;
-
-       prep_new_hugetlb_folio(h, folio, folio_nid(folio));
+       folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
+       if (folio)
+               hugetlb_vmemmap_optimize_folio(h, folio);
        return folio;
 }
 
@@ -2241,12 +2222,10 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
                goto out_unlock;
        spin_unlock_irq(&hugetlb_lock);
 
-       folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
+       folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
        if (!folio)
                return NULL;
 
-       hugetlb_vmemmap_optimize_folio(h, folio);
-
        spin_lock_irq(&hugetlb_lock);
        /*
         * nr_huge_pages needs to be adjusted within the same lock cycle
@@ -2290,6 +2269,10 @@ static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mas
        if (!folio)
                return NULL;
 
+       spin_lock_irq(&hugetlb_lock);
+       __prep_account_new_huge_page(h, folio_nid(folio));
+       spin_unlock_irq(&hugetlb_lock);
+
        /* fresh huge pages are frozen */
        folio_ref_unfreeze(folio, 1);
        /*
@@ -2836,11 +2819,10 @@ retry:
                if (!new_folio) {
                        spin_unlock_irq(&hugetlb_lock);
                        gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
-                       new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
-                                                             NULL, NULL);
+                       new_folio = alloc_fresh_hugetlb_folio(h, gfp_mask,
+                                                             nid, NULL);
                        if (!new_folio)
                                return -ENOMEM;
-                       __prep_new_hugetlb_folio(h, new_folio);
                        goto retry;
                }