]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/hugetlb: allow overcommitting gigantic hugepages
authorUsama Arif <usamaarif642@gmail.com>
Thu, 9 Oct 2025 17:24:31 +0000 (18:24 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:49 +0000 (21:28 -0700)
Currently, gigantic hugepages cannot use the overcommit mechanism
(nr_overcommit_hugepages), forcing users to permanently reserve memory via
nr_hugepages even when pages might not be actively used.

The restriction was added in 2011 [1], which was before there was support
for reserving 1G hugepages at runtime.  Remove this blanket restriction on
gigantic hugepage overcommit.  This will bring the same benefits to
gigantic pages as hugepages:

- Memory is only taken out of regular use when actually needed
- Unused surplus pages can be returned to the system
- Better memory utilization, especially with CMA backing which can
  significantly increase the changes of hugepage allocation

Without this patch:
echo 3 > /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_overcommit_hugepages
bash: echo: write error: Invalid argument

With this patch:
echo 3 > /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_overcommit_hugepages
./mmap_hugetlb_test
Successfully allocated huge pages at address: 0x7f9d40000000

cat mmap_hugetlb_test.c
...
    unsigned long ALLOC_SIZE = 3 * (unsigned long) HUGE_PAGE_SIZE;
    addr = mmap(NULL,
                ALLOC_SIZE, // 3GB
                PROT_READ | PROT_WRITE,
                MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_HUGE_1GB,
                -1,
                0);

    if (addr == MAP_FAILED) {
        fprintf(stderr, "mmap failed: %s\n", strerror(errno));
        return 1;
    }
    printf("Successfully allocated huge pages at address: %p\n", addr);
...

Link: https://lkml.kernel.org/r/20251009172433.4158118-2-usamaarif642@gmail.com
Link: https://git.zx2c4.com/linux-rng/commit/mm/hugetlb.c?id=adbe8726dc2a3805630d517270db17e3af86e526
Signed-off-by: Usama Arif <usamaarif642@gmail.com>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Oscar Salvador <osalvador@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Rik van Riel <riel@surriel.com>
Cc: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 614352aa04d67f023a8faa1c3bf7025bffb77b72..86e672fcb305c8ec4a7cf225f619c5effdd3c1a3 100644 (file)
@@ -2243,7 +2243,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
 {
        struct folio *folio = NULL;
 
-       if (hstate_is_gigantic(h))
+       if (hstate_is_gigantic_no_runtime(h))
                return NULL;
 
        spin_lock_irq(&hugetlb_lock);
@@ -4305,7 +4305,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
        unsigned long input;
        struct hstate *h = kobj_to_hstate(kobj, NULL);
 
-       if (hstate_is_gigantic(h))
+       if (hstate_is_gigantic_no_runtime(h))
                return -EINVAL;
 
        err = kstrtoul(buf, 10, &input);
@@ -5192,7 +5192,7 @@ static int hugetlb_overcommit_handler(const struct ctl_table *table, int write,
 
        tmp = h->nr_overcommit_huge_pages;
 
-       if (write && hstate_is_gigantic(h))
+       if (write && hstate_is_gigantic_no_runtime(h))
                return -EINVAL;
 
        ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,