gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
- set_page_refcounted(page);
/*
* If this is a high-order atomic allocation then check
page = get_page_from_freelist(gfp_mask, order,
alloc_flags, ac);
+ if (page)
+ set_page_refcounted(page);
return page;
}
page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
~__GFP_DIRECT_RECLAIM, order,
ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
- if (page)
+ if (page) {
+ set_page_refcounted(page);
goto out;
+ }
/* Coredumps can quickly deplete all memory reserves */
if (current->flags & PF_DUMPCORE)
count_vm_event(COMPACTSTALL);
/* Prep a captured page if available */
- if (page) {
+ if (page)
prep_new_page(page, order, gfp_mask, alloc_flags);
- set_page_refcounted(page);
- }
/* Try get a page from the freelist if available */
if (!page)
if (page) {
struct zone *zone = page_zone(page);
+ set_page_refcounted(page);
zone->compact_blockskip_flush = false;
compaction_defer_reset(zone, order, true);
count_vm_event(COMPACTSUCCESS);
drained = true;
goto retry;
}
+ set_page_refcounted(page);
out:
psi_memstall_leave(&pflags);
* that first
*/
page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
- if (page)
+ if (page) {
+ set_page_refcounted(page);
goto got_pg;
+ }
/*
* For costly allocations, try direct compaction first, as it's likely
/* Attempt with potentially adjusted zonelist and alloc_flags */
page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
- if (page)
+ if (page) {
+ set_page_refcounted(page);
goto got_pg;
+ }
/* Caller is not willing to reclaim, we can't balance anything */
if (!can_direct_reclaim)
/* First allocation attempt */
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
- if (likely(page))
+ if (likely(page)) {
+ set_page_refcounted(page);
goto out;
+ }
alloc_gfp = gfp;
ac.spread_dirty_pages = false;