]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/hugetlb: add support for mempolicy MPOL_PREFERRED_MANY
authorBen Widawsky <ben.widawsky@intel.com>
Mon, 23 Aug 2021 23:59:35 +0000 (09:59 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Aug 2021 23:34:15 +0000 (09:34 +1000)
Implement the missing huge page allocation functionality while obeying the
preferred node semantics.  This is similar to the implementation for
general page allocation, as it uses a fallback mechanism to try multiple
preferred nodes first, and then all other nodes.

To avoid adding too many "#ifdef CONFIG_NUMA" check, add a helper function
in mempolicy.h to check whether a mempolicy is MPOL_PREFERRED_MANY.

[akpm@linux-foundation.org: fix compiling issue when merging with other hugetlb patch]
[Thanks to 0day bot for catching the !CONFIG_NUMA compiling issue]
[mhocko@suse.com: suggest to remove the #ifdef CONFIG_NUMA check]
Link: https://lore.kernel.org/r/20200630212517.308045-12-ben.widawsky@intel.com
Link: https://lkml.kernel.org/r/1627970362-61305-4-git-send-email-feng.tang@intel.com
Link: https://lkml.kernel.org/r/20210809024430.GA46432@shbuild999.sh.intel.com
Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>
Signed-off-by: Feng Tang <feng.tang@intel.com>
Co-developed-by: Feng Tang <feng.tang@intel.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/hugetlb.c

index dd1c1e7d970b767b395d6f9054f9feb9b04e2242..2b1732ad88f71806bcd45fa233f9bb1ece5a67a7 100644 (file)
@@ -1166,7 +1166,20 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
 
        gfp_mask = htlb_alloc_mask(h);
        nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
+#ifdef CONFIG_NUMA
+       if (mpol->mode == MPOL_PREFERRED_MANY) {
+               page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
+               if (page)
+                       goto check_reserve;
+               /* Fallback to all nodes */
+               nodemask = NULL;
+       }
+#endif
        page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
+
+#ifdef CONFIG_NUMA
+check_reserve:
+#endif
        if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
                SetHPageRestoreReserve(page);
                h->resv_huge_pages--;
@@ -2149,6 +2162,21 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
        nodemask_t *nodemask;
 
        nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
+#ifdef CONFIG_NUMA
+       if (mpol->mode == MPOL_PREFERRED_MANY) {
+               gfp_t gfp = gfp_mask | __GFP_NOWARN;
+
+               gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
+               page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false);
+               if (page) {
+                       mpol_cond_put(mpol);
+                       return page;
+               }
+
+               /* Fallback to all nodes */
+               nodemask = NULL;
+       }
+#endif
        page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false);
        mpol_cond_put(mpol);