]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/page_alloc: reject unreasonable folio/compound page sizes in alloc_contig_range_no...
authorDavid Hildenbrand <david@redhat.com>
Mon, 1 Sep 2025 15:03:27 +0000 (17:03 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:30 +0000 (17:25 -0700)
Let's reject them early, which in turn makes folio_alloc_gigantic() reject
them properly.

To avoid converting from order to nr_pages, let's just add MAX_FOLIO_ORDER
and calculate MAX_FOLIO_NR_PAGES based on that.

While at it, let's just make the order a "const unsigned order".

Link: https://lkml.kernel.org/r/20250901150359.867252-7-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: SeongJae Park <sj@kernel.org>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/page_alloc.c

index 00c8a54127d375fe477f50dc9f13fe24ea55c4c4..77737cbf2216a36428aada0204eb196ddddbe1db 100644 (file)
@@ -2055,11 +2055,13 @@ static inline long folio_nr_pages(const struct folio *folio)
 
 /* Only hugetlbfs can allocate folios larger than MAX_ORDER */
 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-#define MAX_FOLIO_NR_PAGES     (1UL << PUD_ORDER)
+#define MAX_FOLIO_ORDER                PUD_ORDER
 #else
-#define MAX_FOLIO_NR_PAGES     MAX_ORDER_NR_PAGES
+#define MAX_FOLIO_ORDER                MAX_PAGE_ORDER
 #endif
 
+#define MAX_FOLIO_NR_PAGES     (1UL << MAX_FOLIO_ORDER)
+
 /*
  * compound_nr() returns the number of pages in this potentially compound
  * page.  compound_nr() can be called on a tail page, and is defined to
index 0873d640f26cc57d95f91df944d2c940a9dade53..54dbb6f0d14e649a6d63a619d97b922426bc8adf 100644 (file)
@@ -6838,6 +6838,7 @@ static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
 int alloc_contig_range_noprof(unsigned long start, unsigned long end,
                              acr_flags_t alloc_flags, gfp_t gfp_mask)
 {
+       const unsigned int order = ilog2(end - start);
        unsigned long outer_start, outer_end;
        int ret = 0;
 
@@ -6855,6 +6856,14 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
                                            PB_ISOLATE_MODE_CMA_ALLOC :
                                            PB_ISOLATE_MODE_OTHER;
 
+       /*
+        * In contrast to the buddy, we allow for orders here that exceed
+        * MAX_PAGE_ORDER, so we must manually make sure that we are not
+        * exceeding the maximum folio order.
+        */
+       if (WARN_ON_ONCE((gfp_mask & __GFP_COMP) && order > MAX_FOLIO_ORDER))
+               return -EINVAL;
+
        gfp_mask = current_gfp_context(gfp_mask);
        if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
                return -EINVAL;
@@ -6952,7 +6961,6 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
                        free_contig_range(end, outer_end - end);
        } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) {
                struct page *head = pfn_to_page(start);
-               int order = ilog2(end - start);
 
                check_new_pages(head, order);
                prep_new_page(head, order, gfp_mask, 0);