]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
khugepaged: Convert hpage_collapse_alloc_page() to hpage_collapse_alloc_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 04:16:40 +0000 (23:16 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 04:16:40 +0000 (23:16 -0500)
Return the folio directly instead of a boolean success.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/khugepaged.c

index 39c54ea0b6bc06f41ff5e9c0a356baea148fc25c..a1aa06a55466d4b6ce33ec9e9e51e13fbc8f5af1 100644 (file)
@@ -796,19 +796,13 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
 }
 #endif
 
-static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
-                                     nodemask_t *nmask)
+static struct folio *hpage_collapse_alloc_folio(gfp_t gfp, int node,
+               nodemask_t *nmask)
 {
        struct folio *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
 
-       if (unlikely(!folio)) {
-               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
-               return false;
-       }
-
-       count_vm_event(THP_COLLAPSE_ALLOC);
-       *hpage = &folio->page;
-       return true;
+       count_vm_event(folio ? THP_COLLAPSE_ALLOC : THP_COLLAPSE_ALLOC_FAILED);
+       return folio;
 }
 
 /*
@@ -951,15 +945,18 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
                              struct collapse_control *cc)
 {
+       struct folio *folio;
        gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
                     GFP_TRANSHUGE);
        int node = hpage_collapse_find_target_node(cc);
 
-       if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
+       folio = hpage_collapse_alloc_folio(gfp, node, &cc->alloc_nmask);
+       if (!folio)
                return SCAN_ALLOC_HUGE_PAGE_FAIL;
-       if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
+       *hpage = &folio->page;
+       if (unlikely(mem_cgroup_charge(folio, mm, gfp)))
                return SCAN_CGROUP_CHARGE_FAIL;
-       count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
+       count_memcg_page_event(&folio->page, THP_COLLAPSE_ALLOC);
        return SCAN_SUCCEED;
 }