From: Ben Widawsky Date: Mon, 23 Aug 2021 23:59:35 +0000 (+1000) Subject: mm-hugetlb-add-support-for-mempolicy-mpol_preferred_many-fix X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=9a994fbdc0a516ffbef5a26edaaae7f9d31071a7;p=users%2Fjedix%2Flinux-maple.git mm-hugetlb-add-support-for-mempolicy-mpol_preferred_many-fix add helpers to avoid ifdefs Link: https://lore.kernel.org/r/20200630212517.308045-12-ben.widawsky@intel.com Link: https://lkml.kernel.org/r/1627970362-61305-4-git-send-email-feng.tang@intel.com Link: https://lkml.kernel.org/r/20210809024430.GA46432@shbuild999.sh.intel.com Signed-off-by: Feng Tang Signed-off-by: Ben Widawsky Suggested-by: Michal Hocko Co-developed-by: Feng Tang Signed-off-by: Andrew Morton Signed-off-by: Stephen Rothwell --- diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 0117e1ec7b1e..60d5e6c3340c 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -187,6 +187,12 @@ extern void mpol_put_task_policy(struct task_struct *); extern bool numa_demotion_enabled; +static inline bool mpol_is_preferred_many(struct mempolicy *pol) +{ + return (pol->mode == MPOL_PREFERRED_MANY); +} + + #else struct mempolicy {}; @@ -297,5 +303,11 @@ static inline nodemask_t *policy_nodemask_current(gfp_t gfp) } #define numa_demotion_enabled false + +static inline bool mpol_is_preferred_many(struct mempolicy *pol) +{ + return false; +} + #endif /* CONFIG_NUMA */ #endif diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2b1732ad88f7..371fec524ca0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1145,7 +1145,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, unsigned long address, int avoid_reserve, long chg) { - struct page *page; + struct page *page = NULL; struct mempolicy *mpol; gfp_t gfp_mask; nodemask_t *nodemask; @@ -1166,20 +1166,17 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, gfp_mask = htlb_alloc_mask(h); nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); -#ifdef CONFIG_NUMA - if (mpol->mode == MPOL_PREFERRED_MANY) { + + if (mpol_is_preferred_many(mpol)) { page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); - if (page) - goto check_reserve; - /* Fallback to all nodes */ + + /* Fallback to all nodes if page==NULL */ nodemask = NULL; } -#endif - page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); -#ifdef CONFIG_NUMA -check_reserve: -#endif + if (!page) + page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); + if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { SetHPageRestoreReserve(page); h->resv_huge_pages--; @@ -2162,24 +2159,19 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, nodemask_t *nodemask; nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); -#ifdef CONFIG_NUMA - if (mpol->mode == MPOL_PREFERRED_MANY) { + if (mpol_is_preferred_many(mpol)) { gfp_t gfp = gfp_mask | __GFP_NOWARN; gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false); - if (page) { - mpol_cond_put(mpol); - return page; - } - /* Fallback to all nodes */ + /* Fallback to all nodes if page==NULL */ nodemask = NULL; } -#endif - page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false); - mpol_cond_put(mpol); + if (!page) + page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false); + mpol_cond_put(mpol); return page; }