]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm/page_alloc: Move set_page_refcounted() to callers of prep_new_page()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 14 Jun 2022 20:05:45 +0000 (16:05 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 14:00:32 +0000 (09:00 -0500)
In preparation for allocating frozen pages, stop initialising the page
refcount in prep_new_page().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/page_alloc.c

index 8566391db0f4430eef48e7b81e6d5818d7c3b50e..376b55e10efc474504f31e6de21cdd04da6cfb4a 100644 (file)
@@ -2537,7 +2537,6 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
                set_page_pfmemalloc(page);
        else
                clear_page_pfmemalloc(page);
-       set_page_refcounted(page);
 }
 
 /*
@@ -4284,6 +4283,7 @@ try_this_zone:
                                gfp_mask, alloc_flags, ac->migratetype);
                if (page) {
                        prep_new_page(page, order, gfp_mask, alloc_flags);
+                       set_page_refcounted(page);
 
                        /*
                         * If this is a high-order atomic allocation then check
@@ -4507,8 +4507,10 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        count_vm_event(COMPACTSTALL);
 
        /* Prep a captured page if available */
-       if (page)
+       if (page) {
                prep_new_page(page, order, gfp_mask, alloc_flags);
+               set_page_refcounted(page);
+       }
 
        /* Try get a page from the freelist if available */
        if (!page)
@@ -5477,6 +5479,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
                nr_account++;
 
                prep_new_page(page, 0, gfp, 0);
+               set_page_refcounted(page);
                if (page_list)
                        list_add(&page->lru, page_list);
                else