]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
hugetlb: check for anon_vma prior to folio allocation
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Mon, 15 Apr 2024 21:17:47 +0000 (14:17 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 25 Apr 2024 02:34:26 +0000 (19:34 -0700)
Commit 9acad7ba3e25 ("hugetlb: use vmf_anon_prepare() instead of
anon_vma_prepare()") may bailout after allocating a folio if we do not
hold the mmap lock.  When this occurs, vmf_anon_prepare() will release the
vma lock.  Hugetlb then attempts to call restore_reserve_on_error(), which
depends on the vma lock being held.

We can move vmf_anon_prepare() prior to the folio allocation in order to
avoid calling restore_reserve_on_error() without the vma lock.

Link: https://lkml.kernel.org/r/ZiFqSrSRLhIV91og@fedora
Fixes: 9acad7ba3e25 ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()")
Reported-by: syzbot+ad1b592fc4483655438b@syzkaller.appspotmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 4553241f0fb28415f3d84107532a9b8b7a5a0024..05371bf54f96d306476bdafe9fa88f2ffb4da785 100644 (file)
@@ -6261,6 +6261,12 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
                                                        VM_UFFD_MISSING);
                }
 
+               if (!(vma->vm_flags & VM_MAYSHARE)) {
+                       ret = vmf_anon_prepare(vmf);
+                       if (unlikely(ret))
+                               goto out;
+               }
+
                folio = alloc_hugetlb_folio(vma, haddr, 0);
                if (IS_ERR(folio)) {
                        /*
@@ -6297,15 +6303,12 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
                                 */
                                restore_reserve_on_error(h, vma, haddr, folio);
                                folio_put(folio);
+                               ret = VM_FAULT_SIGBUS;
                                goto out;
                        }
                        new_pagecache_folio = true;
                } else {
                        folio_lock(folio);
-
-                       ret = vmf_anon_prepare(vmf);
-                       if (unlikely(ret))
-                               goto backout_unlocked;
                        anon_rmap = 1;
                }
        } else {