]> www.infradead.org Git - users/hch/misc.git/commitdiff
mm/page_alloc: move set_page_refcounted() to callers of get_page_from_freelist()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 25 Nov 2024 21:01:38 +0000 (21:01 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:32 +0000 (22:40 -0800)
In preparation for allocating frozen pages, stop initialising the page
refcount in get_page_from_freelist().

Link: https://lkml.kernel.org/r/20241125210149.2976098-7-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c

index 08cc1e0bd95030afb008e0546611a3fa88ef3a20..2786117a50ee64ad61944cd6eab8764214ef960c 100644 (file)
@@ -3473,7 +3473,6 @@ try_this_zone:
                                gfp_mask, alloc_flags, ac->migratetype);
                if (page) {
                        prep_new_page(page, order, gfp_mask, alloc_flags);
-                       set_page_refcounted(page);
 
                        /*
                         * If this is a high-order atomic allocation then check
@@ -3568,6 +3567,8 @@ __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
                page = get_page_from_freelist(gfp_mask, order,
                                alloc_flags, ac);
 
+       if (page)
+               set_page_refcounted(page);
        return page;
 }
 
@@ -3606,8 +3607,10 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
                                      ~__GFP_DIRECT_RECLAIM, order,
                                      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
-       if (page)
+       if (page) {
+               set_page_refcounted(page);
                goto out;
+       }
 
        /* Coredumps can quickly deplete all memory reserves */
        if (current->flags & PF_DUMPCORE)
@@ -3698,10 +3701,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        count_vm_event(COMPACTSTALL);
 
        /* Prep a captured page if available */
-       if (page) {
+       if (page)
                prep_new_page(page, order, gfp_mask, alloc_flags);
-               set_page_refcounted(page);
-       }
 
        /* Try get a page from the freelist if available */
        if (!page)
@@ -3710,6 +3711,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        if (page) {
                struct zone *zone = page_zone(page);
 
+               set_page_refcounted(page);
                zone->compact_blockskip_flush = false;
                compaction_defer_reset(zone, order, true);
                count_vm_event(COMPACTSUCCESS);
@@ -3968,6 +3970,7 @@ retry:
                drained = true;
                goto retry;
        }
+       set_page_refcounted(page);
 out:
        psi_memstall_leave(&pflags);
 
@@ -4288,8 +4291,10 @@ restart:
         * that first
         */
        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
-       if (page)
+       if (page) {
+               set_page_refcounted(page);
                goto got_pg;
+       }
 
        /*
         * For costly allocations, try direct compaction first, as it's likely
@@ -4369,8 +4374,10 @@ retry:
 
        /* Attempt with potentially adjusted zonelist and alloc_flags */
        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
-       if (page)
+       if (page) {
+               set_page_refcounted(page);
                goto got_pg;
+       }
 
        /* Caller is not willing to reclaim, we can't balance anything */
        if (!can_direct_reclaim)
@@ -4754,8 +4761,10 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
 
        /* First allocation attempt */
        page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
-       if (likely(page))
+       if (likely(page)) {
+               set_page_refcounted(page);
                goto out;
+       }
 
        alloc_gfp = gfp;
        ac.spread_dirty_pages = false;