static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
int drain) { }
#endif
-static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
+static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
/*
* Try to allocate a partial slab from a specific node.
list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
void *t;
- if (!pfmemalloc_match(slab_page(slab), gfpflags))
+ if (!pfmemalloc_match(slab, gfpflags))
continue;
t = acquire_slab(s, n, slab, object == NULL, &objects);
#endif
}
-static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
+static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
{
- if (unlikely(PageSlabPfmemalloc(page)))
+ if (unlikely(slab_test_pfmemalloc(slab)))
return gfp_pfmemalloc_allowed(gfpflags);
return true;
}
}
- if (unlikely(!pfmemalloc_match(slab_page(slab), gfpflags)))
+ if (unlikely(!pfmemalloc_match(slab, gfpflags)))
/*
* For !pfmemalloc_match() case we don't load freelist so that
* we don't make further mismatched allocations easier.