]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm/page_alloc: Move set_page_refcounted() to end of __alloc_pages()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 14 Jun 2022 20:38:29 +0000 (16:38 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 14:00:32 +0000 (09:00 -0500)
Remove some code duplication by calling set_page_refcounted() at the
end of __alloc_pages() instead of after each call that can allocate
a page.  That means that we free a frozen page if we've exceeded the
allowed memcg memory.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/page_alloc.c

index 6dc40d1e5bbeb15ea3959d7f7d3403b4e1da4e69..810990618186df766546becd9bcc1dd32c870a1f 100644 (file)
@@ -5550,10 +5550,8 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
 
        /* First allocation attempt */
        page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
-       if (likely(page)) {
-               set_page_refcounted(page);
+       if (likely(page))
                goto out;
-       }
 
        alloc_gfp = gfp;
        ac.spread_dirty_pages = false;
@@ -5565,15 +5563,15 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
        ac.nodemask = nodemask;
 
        page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
-       if (page)
-               set_page_refcounted(page);
 
 out:
        if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
            unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
-               __free_pages(page, order);
+               free_frozen_pages(page, order);
                page = NULL;
        }
+       if (page)
+               set_page_refcounted(page);
 
        trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
        kmsan_alloc_page(page, order, alloc_gfp);