]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/vmalloc: request large order pages from buddy allocator
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Tue, 21 Oct 2025 19:44:56 +0000 (12:44 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 22 Oct 2025 01:51:54 +0000 (18:51 -0700)
Sometimes, vm_area_alloc_pages() will want many pages from the buddy
allocator.  Rather than making requests to the buddy allocator for at most
100 pages at a time, we can eagerly request large order pages a smaller
number of times.

We still split the large order pages down to order-0 as the rest of the
vmalloc code (and some callers) depend on it.  We still defer to the bulk
allocator and fallback path in case of order-0 pages or failure.

Running 1000 iterations of allocations on a small 4GB system finds:

1000 2mb allocations:
[Baseline] [This patch]
real    46.310s real    0m34.582
user    0.001s user    0.006s
sys     46.058s sys     0m34.365s

10000 200kb allocations:
[Baseline] [This patch]
real    56.104s real    0m43.696
user    0.001s user    0.003s
sys     55.375s sys     0m42.995s

Link: https://lkml.kernel.org/r/20251021194455.33351-2-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index adde450ddf5e2f86d36efedd1e23db496658f159..0832f944544cbc66b47d2067034782e836d3c675 100644 (file)
@@ -3619,8 +3619,44 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                unsigned int order, unsigned int nr_pages, struct page **pages)
 {
        unsigned int nr_allocated = 0;
+       unsigned int nr_remaining = nr_pages;
+       unsigned int max_attempt_order = MAX_PAGE_ORDER;
        struct page *page;
        int i;
+       gfp_t large_gfp = (gfp &
+               ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL | __GFP_COMP))
+               | __GFP_NOWARN;
+       unsigned int large_order = ilog2(nr_remaining);
+
+       large_order = min(max_attempt_order, large_order);
+
+       /*
+        * Initially, attempt to have the page allocator give us large order
+        * pages. Do not attempt allocating smaller than order chunks since
+        * __vmap_pages_range() expects physically contigous pages of exactly
+        * order long chunks.
+        */
+       while (large_order > order && nr_remaining) {
+               if (nid == NUMA_NO_NODE)
+                       page = alloc_pages_noprof(large_gfp, large_order);
+               else
+                       page = alloc_pages_node_noprof(nid, large_gfp, large_order);
+
+               if (unlikely(!page)) {
+                       max_attempt_order = --large_order;
+                       continue;
+               }
+
+               split_page(page, large_order);
+               for (i = 0; i < (1U << large_order); i++)
+                       pages[nr_allocated + i] = page + i;
+
+               nr_allocated += 1U << large_order;
+               nr_remaining = nr_pages - nr_allocated;
+
+               large_order = ilog2(nr_remaining);
+               large_order = min(max_attempt_order, large_order);
+       }
 
        /*
         * For order-0 pages we make use of bulk allocator, if