]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm/hugetlb.c: fix UAF of vma in hugetlb fault pathway
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Sat, 14 Sep 2024 19:41:19 +0000 (12:41 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 17 Sep 2024 07:58:04 +0000 (00:58 -0700)
Syzbot reports a UAF in hugetlb_fault().  This happens because
vmf_anon_prepare() could drop the per-VMA lock and allow the current VMA
to be freed before hugetlb_vma_unlock_read() is called.

We can fix this by using a modified version of vmf_anon_prepare() that
doesn't release the VMA lock on failure, and then release it ourselves
after hugetlb_vma_unlock_read().

Link: https://lkml.kernel.org/r/20240914194243.245-2-vishal.moola@gmail.com
Fixes: 9acad7ba3e25 ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()")
Reported-by: syzbot+2dab93857ee95f2eeb08@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index aaf508be0a2b0401c6056d097ad84dd10123aa0d..9a3a6e2dee97a2ba99f651fff9aa424efb7092ee 100644 (file)
@@ -6048,7 +6048,7 @@ retry_avoidcopy:
         * When the original hugepage is shared one, it does not have
         * anon_vma prepared.
         */
-       ret = vmf_anon_prepare(vmf);
+       ret = __vmf_anon_prepare(vmf);
        if (unlikely(ret))
                goto out_release_all;
 
@@ -6247,7 +6247,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
                }
 
                if (!(vma->vm_flags & VM_MAYSHARE)) {
-                       ret = vmf_anon_prepare(vmf);
+                       ret = __vmf_anon_prepare(vmf);
                        if (unlikely(ret))
                                goto out;
                }
@@ -6378,6 +6378,14 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
        folio_unlock(folio);
 out:
        hugetlb_vma_unlock_read(vma);
+
+       /*
+        * We must check to release the per-VMA lock. __vmf_anon_prepare() is
+        * the only way ret can be set to VM_FAULT_RETRY.
+        */
+       if (unlikely(ret & VM_FAULT_RETRY))
+               vma_end_read(vma);
+
        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
        return ret;
 
@@ -6599,6 +6607,14 @@ out_ptl:
        }
 out_mutex:
        hugetlb_vma_unlock_read(vma);
+
+       /*
+        * We must check to release the per-VMA lock. __vmf_anon_prepare() in
+        * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
+        */
+       if (unlikely(ret & VM_FAULT_RETRY))
+               vma_end_read(vma);
+
        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
        /*
         * Generally it's safe to hold refcount during waiting page lock. But