]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm/page_alloc: Move set_page_refcounted() to callers of __alloc_pages_may_oom()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 14 Jun 2022 20:22:55 +0000 (16:22 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 14:00:32 +0000 (09:00 -0500)
In preparation for allocating frozen pages, stop initialising the page
refcount in __alloc_pages_may_oom().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/page_alloc.c

index ef71500f28bebdd20e318ed780bd9f74ac823663..980ced03977705658b80e7c68ef697b2126999ed 100644 (file)
@@ -4414,10 +4414,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
                                      ~__GFP_DIRECT_RECLAIM, order,
                                      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
-       if (page) {
-               set_page_refcounted(page);
+       if (page)
                goto out;
-       }
 
        /* Coredumps can quickly deplete all memory reserves */
        if (current->flags & PF_DUMPCORE)
@@ -4462,8 +4460,6 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                if (gfp_mask & __GFP_NOFAIL)
                        page = __alloc_pages_cpuset_fallback(gfp_mask, order,
                                        ALLOC_NO_WATERMARKS, ac);
-               if (page)
-                       set_page_refcounted(page);
        }
 out:
        mutex_unlock(&oom_lock);
@@ -5236,8 +5232,10 @@ retry:
 
        /* Reclaim has failed us, start killing things */
        page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
-       if (page)
+       if (page) {
+               set_page_refcounted(page);
                goto got_pg;
+       }
 
        /* Avoid allocations with no watermarks from looping endlessly */
        if (tsk_is_oom_victim(current) &&