]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/hugetlb: retry to allocate for early boot hugepage allocation
authorLi RongQing <lirongqing@baidu.com>
Mon, 1 Sep 2025 08:20:52 +0000 (16:20 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:39 +0000 (17:25 -0700)
In cloud environments with massive hugepage reservations (95%+ of system
RAM), single-attempt allocation during early boot often fails due to
memory pressure.

Commit 91f386bf0772 ("hugetlb: batch freeing of vmemmap pages")
intensified this by deferring page frees, increase peak memory usage
during allocation.

Introduce a retry mechanism that leverages vmemmap optimization reclaim
(~1.6% memory) when available.  Upon initial allocation failure, the
system retries until successful or no further progress is made, ensuring
reliable hugepage allocation while preserving batched vmemmap freeing
benefits.

Testing on a 256G machine allocating 252G of hugepages:
Before: 128056/129024 hugepages allocated
After:  Successfully allocated all 129024 hugepages

Link: https://lkml.kernel.org/r/20250901082052.3247-1-lirongqing@baidu.com
Signed-off-by: Li RongQing <lirongqing@baidu.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Li RongQing <lirongqing@baidu.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 0a1760aa206ba7a6bb1e9530d0a1a1dc47071010..cc405b8b118adea58b31da0ed69e465eb97d63e1 100644 (file)
@@ -3593,10 +3593,9 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
 
        unsigned long jiffies_start;
        unsigned long jiffies_end;
+       unsigned long remaining;
 
        job.thread_fn   = hugetlb_pages_alloc_boot_node;
-       job.start       = 0;
-       job.size        = h->max_huge_pages;
 
        /*
         * job.max_threads is 25% of the available cpu threads by default.
@@ -3620,10 +3619,29 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
        }
 
        job.max_threads = hugepage_allocation_threads;
-       job.min_chunk   = h->max_huge_pages / hugepage_allocation_threads;
 
        jiffies_start = jiffies;
-       padata_do_multithreaded(&job);
+       do {
+               remaining = h->max_huge_pages - h->nr_huge_pages;
+
+               job.start     = h->nr_huge_pages;
+               job.size      = remaining;
+               job.min_chunk = remaining / hugepage_allocation_threads;
+               padata_do_multithreaded(&job);
+
+               if (h->nr_huge_pages == h->max_huge_pages)
+                       break;
+
+               /*
+                * Retry only if the vmemmap optimization might have been able to free
+                * some memory back to the system.
+                */
+               if (!hugetlb_vmemmap_optimizable(h))
+                       break;
+
+               /* Continue if progress was made in last iteration */
+       } while (remaining != (h->max_huge_pages - h->nr_huge_pages));
+
        jiffies_end = jiffies;
 
        pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n",