}
#endif
-static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
- nodemask_t *nmask)
+static struct folio *hpage_collapse_alloc_folio(gfp_t gfp, int node,
+ nodemask_t *nmask)
{
struct folio *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
- if (unlikely(!folio)) {
- count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
- return false;
- }
-
- count_vm_event(THP_COLLAPSE_ALLOC);
- *hpage = &folio->page;
- return true;
+ count_vm_event(folio ? THP_COLLAPSE_ALLOC : THP_COLLAPSE_ALLOC_FAILED);
+ return folio;
}
/*
static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
struct collapse_control *cc)
{
+ struct folio *folio;
gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
GFP_TRANSHUGE);
int node = hpage_collapse_find_target_node(cc);
- if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
+ folio = hpage_collapse_alloc_folio(gfp, node, &cc->alloc_nmask);
+ if (!folio)
return SCAN_ALLOC_HUGE_PAGE_FAIL;
- if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
+ *hpage = &folio->page;
+ if (unlikely(mem_cgroup_charge(folio, mm, gfp)))
return SCAN_CGROUP_CHARGE_FAIL;
- count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
+ count_memcg_page_event(&folio->page, THP_COLLAPSE_ALLOC);
return SCAN_SUCCEED;
}