]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm/page_alloc: Add __alloc_frozen_pages()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 14 Jun 2022 20:47:57 +0000 (16:47 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 14:00:32 +0000 (09:00 -0500)
Defer the initialisation of the page refcount to the new __alloc_pages()
wrapper and turn the old __alloc_pages() into __alloc_frozen_pages().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/internal.h
mm/page_alloc.c

index e74441b4cca0be6cbe789e4c9f8fb546cfc6b183..66ea73fbb5b990eeca276fac69be4699f6ac7ccc 100644 (file)
@@ -382,6 +382,8 @@ extern void post_alloc_hook(struct page *page, unsigned int order,
                                        gfp_t gfp_flags);
 extern int user_min_free_kbytes;
 
+struct page *__alloc_frozen_pages(gfp_t, unsigned int order, int nid,
+               nodemask_t *);
 void free_frozen_pages(struct page *, unsigned int order);
 void free_unref_page_list(struct list_head *list);
 
index 810990618186df766546becd9bcc1dd32c870a1f..7314679d0b7a36658a50a38c5ff31b936a452d23 100644 (file)
@@ -5513,8 +5513,8 @@ EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
-struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
-                                                       nodemask_t *nodemask)
+struct page *__alloc_frozen_pages(gfp_t gfp, unsigned int order,
+               int preferred_nid, nodemask_t *nodemask)
 {
        struct page *page;
        unsigned int alloc_flags = ALLOC_WMARK_LOW;
@@ -5570,14 +5570,23 @@ out:
                free_frozen_pages(page, order);
                page = NULL;
        }
-       if (page)
-               set_page_refcounted(page);
 
        trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
        kmsan_alloc_page(page, order, alloc_gfp);
 
        return page;
 }
+
+struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
+                                                       nodemask_t *nodemask)
+{
+       struct page *page;
+
+       page = __alloc_frozen_pages(gfp, order, preferred_nid, nodemask);
+       if (page)
+               set_page_refcounted(page);
+       return page;
+}
 EXPORT_SYMBOL(__alloc_pages);
 
 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,