]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm/page_alloc: Move set_page_refcounted() to callers of __alloc_pages_slowpath()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 14 Jun 2022 20:33:11 +0000 (16:33 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 14:00:32 +0000 (09:00 -0500)
In preparation for allocating frozen pages, stop initialising the page
refcount in __alloc_pages_slowpath().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/page_alloc.c

index d31c56f95aa58a857849639f9562ccead8e6615d..6dc40d1e5bbeb15ea3959d7f7d3403b4e1da4e69 100644 (file)
@@ -5084,10 +5084,8 @@ restart:
         * that first
         */
        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
-       if (page) {
-               set_page_refcounted(page);
+       if (page)
                goto got_pg;
-       }
 
        /*
         * For costly allocations, try direct compaction first, as it's likely
@@ -5106,10 +5104,8 @@ restart:
                                                alloc_flags, ac,
                                                INIT_COMPACT_PRIORITY,
                                                &compact_result);
-               if (page) {
-                       set_page_refcounted(page);
+               if (page)
                        goto got_pg;
-               }
 
                /*
                 * Checks for costly allocations with __GFP_NORETRY, which
@@ -5169,10 +5165,8 @@ retry:
 
        /* Attempt with potentially adjusted zonelist and alloc_flags */
        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
-       if (page) {
-               set_page_refcounted(page);
+       if (page)
                goto got_pg;
-       }
 
        /* Caller is not willing to reclaim, we can't balance anything */
        if (!can_direct_reclaim)
@@ -5185,18 +5179,14 @@ retry:
        /* Try direct reclaim and then allocating */
        page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
                                                        &did_some_progress);
-       if (page) {
-               set_page_refcounted(page);
+       if (page)
                goto got_pg;
-       }
 
        /* Try direct compaction and then allocating */
        page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
                                        compact_priority, &compact_result);
-       if (page) {
-               set_page_refcounted(page);
+       if (page)
                goto got_pg;
-       }
 
        /* Do not loop if specifically requested */
        if (gfp_mask & __GFP_NORETRY)
@@ -5236,10 +5226,8 @@ retry:
 
        /* Reclaim has failed us, start killing things */
        page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
-       if (page) {
-               set_page_refcounted(page);
+       if (page)
                goto got_pg;
-       }
 
        /* Avoid allocations with no watermarks from looping endlessly */
        if (tsk_is_oom_victim(current) &&
@@ -5296,10 +5284,8 @@ nopage:
                 * the situation worse
                 */
                page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
-               if (page) {
-                       set_page_refcounted(page);
+               if (page)
                        goto got_pg;
-               }
 
                cond_resched();
                goto retry;
@@ -5579,6 +5565,8 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
        ac.nodemask = nodemask;
 
        page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
+       if (page)
+               set_page_refcounted(page);
 
 out:
        if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&