]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm-hugetlb-add-support-for-mempolicy-mpol_preferred_many-fix
authorBen Widawsky <ben.widawsky@intel.com>
Mon, 23 Aug 2021 23:59:35 +0000 (09:59 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Aug 2021 23:34:15 +0000 (09:34 +1000)
add helpers to avoid ifdefs

Link: https://lore.kernel.org/r/20200630212517.308045-12-ben.widawsky@intel.com
Link: https://lkml.kernel.org/r/1627970362-61305-4-git-send-email-feng.tang@intel.com
Link: https://lkml.kernel.org/r/20210809024430.GA46432@shbuild999.sh.intel.com
Signed-off-by: Feng Tang <feng.tang@intel.com>
Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Co-developed-by: Feng Tang <feng.tang@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
include/linux/mempolicy.h
mm/hugetlb.c

index 0117e1ec7b1e1c73edb2e9bd91d32c41e11cebb8..60d5e6c3340c14da1cb321601e6a5cc3abca1508 100644 (file)
@@ -187,6 +187,12 @@ extern void mpol_put_task_policy(struct task_struct *);
 
 extern bool numa_demotion_enabled;
 
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
+{
+       return  (pol->mode == MPOL_PREFERRED_MANY);
+}
+
+
 #else
 
 struct mempolicy {};
@@ -297,5 +303,11 @@ static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
 }
 
 #define numa_demotion_enabled  false
+
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
+{
+       return  false;
+}
+
 #endif /* CONFIG_NUMA */
 #endif
index 2b1732ad88f71806bcd45fa233f9bb1ece5a67a7..371fec524ca0dc58c39ae45cbcd9b37f9d43bce4 100644 (file)
@@ -1145,7 +1145,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
                                unsigned long address, int avoid_reserve,
                                long chg)
 {
-       struct page *page;
+       struct page *page = NULL;
        struct mempolicy *mpol;
        gfp_t gfp_mask;
        nodemask_t *nodemask;
@@ -1166,20 +1166,17 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
 
        gfp_mask = htlb_alloc_mask(h);
        nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
-#ifdef CONFIG_NUMA
-       if (mpol->mode == MPOL_PREFERRED_MANY) {
+
+       if (mpol_is_preferred_many(mpol)) {
                page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
-               if (page)
-                       goto check_reserve;
-               /* Fallback to all nodes */
+
+               /* Fallback to all nodes if page==NULL */
                nodemask = NULL;
        }
-#endif
-       page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
 
-#ifdef CONFIG_NUMA
-check_reserve:
-#endif
+       if (!page)
+               page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
+
        if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
                SetHPageRestoreReserve(page);
                h->resv_huge_pages--;
@@ -2162,24 +2159,19 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
        nodemask_t *nodemask;
 
        nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
-#ifdef CONFIG_NUMA
-       if (mpol->mode == MPOL_PREFERRED_MANY) {
+       if (mpol_is_preferred_many(mpol)) {
                gfp_t gfp = gfp_mask | __GFP_NOWARN;
 
                gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
                page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false);
-               if (page) {
-                       mpol_cond_put(mpol);
-                       return page;
-               }
 
-               /* Fallback to all nodes */
+               /* Fallback to all nodes if page==NULL */
                nodemask = NULL;
        }
-#endif
-       page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false);
-       mpol_cond_put(mpol);
 
+       if (!page)
+               page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false);
+       mpol_cond_put(mpol);
        return page;
 }