]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm/hugetlb: drop vma_has_reserves()
authorPeter Xu <peterx@redhat.com>
Tue, 7 Jan 2025 20:40:01 +0000 (15:40 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 26 Jan 2025 04:22:31 +0000 (20:22 -0800)
After the previous cleanup, vma_has_reserves() is mostly an empty helper
except that it says "use reserve count" is inverted meaning from "needs a
global reserve count", which is still true.

To avoid confusions on having two inverted ways to ask the same question,
always use the gbl_chg everywhere, and drop the function.

When at it, rename "chg" to "gbl_chg" in dequeue_hugetlb_folio_vma().  It
might be helpful for readers to see that the "chg" here is the global
reserve count, not the vma resv count.

Link: https://lkml.kernel.org/r/20250107204002.2683356-7-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Cc: Ackerley Tng <ackerleytng@google.com>
Cc: Breno Leitao <leitao@debian.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Rik van Riel <riel@surriel.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index efd8e9f9bf0ea71d51f75d2f68bce5a972725ecd..8e46798a9dfc2f1a498d7cf2cebd1b29038f4e7f 100644 (file)
@@ -1247,16 +1247,6 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
        hugetlb_dup_vma_private(vma);
 }
 
-/* Returns true if the VMA has associated reserve pages */
-static bool vma_has_reserves(long chg)
-{
-       /*
-        * Now "chg" has all the conditions considered for whether we
-        * should use an existing reservation.
-        */
-       return chg == 0;
-}
-
 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
 {
        int nid = folio_nid(folio);
@@ -1345,7 +1335,7 @@ static unsigned long available_huge_pages(struct hstate *h)
 
 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
                                struct vm_area_struct *vma,
-                               unsigned long address, long chg)
+                               unsigned long address, long gbl_chg)
 {
        struct folio *folio = NULL;
        struct mempolicy *mpol;
@@ -1354,11 +1344,10 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
        int nid;
 
        /*
-        * A child process with MAP_PRIVATE mappings created by their parent
-        * have no page reserves. This check ensures that reservations are
-        * not "stolen". The child may still get SIGKILLed
+        * gbl_chg==1 means the allocation requires a new page that was not
+        * reserved before.  Making sure there's at least one free page.
         */
-       if (!vma_has_reserves(chg) && !available_huge_pages(h))
+       if (gbl_chg && !available_huge_pages(h))
                goto err;
 
        gfp_mask = htlb_alloc_mask(h);
@@ -1376,7 +1365,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
                folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
                                                        nid, nodemask);
 
-       if (folio && vma_has_reserves(chg)) {
+       if (folio && !gbl_chg) {
                folio_set_hugetlb_restore_reserve(folio);
                h->resv_huge_pages--;
        }
@@ -3067,7 +3056,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
                if (!folio)
                        goto out_uncharge_cgroup;
                spin_lock_irq(&hugetlb_lock);
-               if (vma_has_reserves(gbl_chg)) {
+               if (!gbl_chg) {
                        folio_set_hugetlb_restore_reserve(folio);
                        h->resv_huge_pages--;
                }