]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/mmap: Fix use-after-free when expanding the VMA v6.4-rc5_fixes
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 14 Jun 2023 22:30:35 +0000 (18:30 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 15 Jun 2023 20:04:37 +0000 (16:04 -0400)
Expanding the VMA can cause new maple tree nodes to be used if a gap
disappears, or the VMA expands beyond the node boundary into the
preceding node (GROWSUP only).  Since the expansion only takes the read
lock and other readers are not taking the rcu read lock, other walkers
may still have a pointer to the freed node.

Fix the use-after-free by dropping the read lock and obtaining the write
lock in these cases.  In the case of GROWSUP, ensure the write lock is
always taken.

Fixes: d4af56c5c7c6 ("mm: start tracking VMAs with maple tree")
Cc: stable@vger.kernel.org
Reported-and-tested-by: syzbot+f3728183b2f78d0d40f6@syzkaller.appspotmail.com
Reported-by: Ruihan Li <lrh2000@pku.edu.cn>
Co-Developed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
arch/x86/mm/fault.c
fs/binfmt_elf.c
fs/exec.c
include/linux/mm.h
mm/mmap.c
mm/nommu.c

index e4399983c50c05aa1e2906ce9df15874304f9b31..af686f6b35aaa66e11ddf8047588e681d5a2511c 100644 (file)
@@ -1237,6 +1237,7 @@ void do_user_addr_fault(struct pt_regs *regs,
        struct mm_struct *mm;
        vm_fault_t fault;
        unsigned int flags = FAULT_FLAG_DEFAULT;
+       int err;
 
        tsk = current;
        mm = tsk->mm;
@@ -1405,11 +1406,16 @@ retry:
        }
        if (likely(vma->vm_start <= address))
                goto good_area;
-       if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
-               bad_area(regs, error_code, address);
-               return;
+       err = expand_stack(vma, address, false);
+       if (unlikely(err == -EAGAIN)) {
+               mmap_read_unlock(mm);
+               mmap_write_lock(mm);
+               vma = find_vma(mm, address);
+               if (vma)
+                       err = expand_stack(vma, address, true);
+               mmap_write_downgrade(mm);
        }
-       if (unlikely(expand_stack(vma, address))) {
+       if (unlikely(err)) {
                bad_area(regs, error_code, address);
                return;
        }
index 1033fbdfdbec73da272284c73bbe9d3e28a0a440..869c3aa0e45587a9aa3cad30641e68c2f5cb21ef 100644 (file)
@@ -320,10 +320,10 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
         * Grow the stack manually; some architectures have a limit on how
         * far ahead a user-space access may be in order to grow the stack.
         */
-       if (mmap_read_lock_killable(mm))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
-       vma = find_extend_vma(mm, bprm->p);
-       mmap_read_unlock(mm);
+       vma = find_extend_vma_locked(mm, bprm->p, true);
+       mmap_write_unlock(mm);
        if (!vma)
                return -EFAULT;
 
index a466e797c8e2e67577be128e1e95775b00970d95..9157b9c256318c1c0ff8453301b739f7a75edd36 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -205,9 +205,11 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 
 #ifdef CONFIG_STACK_GROWSUP
        if (write) {
-               ret = expand_downwards(bprm->vma, pos);
+               mmap_write_lock(bprm->mm);
+               ret = expand_downwards(bprm->vma, pos, true);
                if (ret < 0)
                        return NULL;
+               mmap_write_unlock(bprm->mm);
        }
 #endif
 
@@ -853,7 +855,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
        stack_base = vma->vm_end - stack_expand;
 #endif
        current->mm->start_stack = bprm->p;
-       ret = expand_stack(vma, stack_base);
+       ret = expand_stack(vma, stack_base, true);
        if (ret)
                ret = -EFAULT;
 
index 27ce77080c79c7a026e641e491246fcf6f7e26c0..58826659c74473499415226f906dc4c83db22df6 100644 (file)
@@ -3190,11 +3190,12 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
 
 extern unsigned long stack_guard_gap;
 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
-extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+int expand_stack(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked);
 
 /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
-extern int expand_downwards(struct vm_area_struct *vma,
-               unsigned long address);
+int expand_downwards(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked);
 #if VM_GROWSUP
 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
 #else
@@ -3295,6 +3296,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
 #endif
 
 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
+               unsigned long addr, bool write_locked);
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
                        unsigned long pfn, unsigned long size, pgprot_t);
 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
index 13678edaa22c9005437a53965d6feccd457c5769..2f560181148d043403d0212800adf1a1d2a95cc2 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1935,7 +1935,8 @@ static int acct_stack_growth(struct vm_area_struct *vma,
  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
  * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
-int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+int expand_upwards(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct vm_area_struct *next;
@@ -1959,6 +1960,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        if (gap_addr < address || gap_addr > TASK_SIZE)
                gap_addr = TASK_SIZE;
 
+       if (!write_locked)
+               return -EAGAIN;
        next = find_vma_intersection(mm, vma->vm_end, gap_addr);
        if (next && vma_is_accessible(next)) {
                if (!(next->vm_flags & VM_GROWSUP))
@@ -2028,7 +2031,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 /*
  * vma is the first one with address < vma->vm_start.  Have to extend vma.
  */
-int expand_downwards(struct vm_area_struct *vma, unsigned long address)
+int expand_downwards(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked)
 {
        struct mm_struct *mm = vma->vm_mm;
        MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
@@ -2042,10 +2046,13 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
        /* Enforce stack_guard_gap */
        prev = mas_prev(&mas, 0);
        /* Check that both stack segments have the same anon_vma? */
-       if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
-                       vma_is_accessible(prev)) {
-               if (address - prev->vm_end < stack_guard_gap)
+       if (prev) {
+               if (!(prev->vm_flags & VM_GROWSDOWN) &&
+                   vma_is_accessible(prev) &&
+                   (address - prev->vm_end < stack_guard_gap))
                        return -ENOMEM;
+               if (!write_locked && (prev->vm_end == address))
+                       return -EAGAIN;
        }
 
        if (mas_preallocate(&mas, GFP_KERNEL))
@@ -2124,37 +2131,59 @@ static int __init cmdline_parse_stack_guard_gap(char *p)
 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
 
 #ifdef CONFIG_STACK_GROWSUP
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
+int expand_stack(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked)
 {
-       return expand_upwards(vma, address);
+       return expand_upwards(vma, address, write_locked);
 }
 
-struct vm_area_struct *
-find_extend_vma(struct mm_struct *mm, unsigned long addr)
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
+               unsigned long addr, bool write_locked)
 {
        struct vm_area_struct *vma, *prev;
+       int err;
 
        addr &= PAGE_MASK;
        vma = find_vma_prev(mm, addr, &prev);
        if (vma && (vma->vm_start <= addr))
                return vma;
-       if (!prev || expand_stack(prev, addr))
+       if (!prev)
+               return NULL;
+       if (write_locked) {
+               err = expand_stack(prev, addr, true);
+       } else {
+               mmap_read_unlock(mm);
+               mmap_write_lock(mm);
+               vma = find_vma_prev(mm, addr, &prev);
+               if (vma && (vma->vm_start <= addr)) {
+                       mmap_write_downgrade(mm);
+                       return vma;
+               }
+               if (prev)
+                       err = expand_stack(prev, addr, true);
+               mmap_write_downgrade(mm);
+       }
+       if (err)
                return NULL;
        if (prev->vm_flags & VM_LOCKED)
                populate_vma_page_range(prev, addr, prev->vm_end, NULL);
        return prev;
 }
 #else
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
+int expand_stack(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked)
 {
-       return expand_downwards(vma, address);
+       if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
+               return -EINVAL;
+       return expand_downwards(vma, address, write_locked);
 }
 
-struct vm_area_struct *
-find_extend_vma(struct mm_struct *mm, unsigned long addr)
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
+               unsigned long addr, bool write_locked)
 {
        struct vm_area_struct *vma;
        unsigned long start;
+       int err;
 
        addr &= PAGE_MASK;
        vma = find_vma(mm, addr);
@@ -2162,10 +2191,17 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
                return NULL;
        if (vma->vm_start <= addr)
                return vma;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               return NULL;
        start = vma->vm_start;
-       if (expand_stack(vma, addr))
+       err = expand_stack(vma, addr, write_locked);
+       if (unlikely(err == -EAGAIN)) {
+               mmap_read_unlock(mm);
+               mmap_write_lock(mm);
+               vma = find_vma(mm, addr);
+               if (vma)
+                       err = expand_stack(vma, addr, true);
+               mmap_write_downgrade(mm);
+       }
+       if (err)
                return NULL;
        if (vma->vm_flags & VM_LOCKED)
                populate_vma_page_range(vma, addr, start, NULL);
@@ -2173,6 +2209,11 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
 }
 #endif
 
+struct vm_area_struct *find_extend_vma(struct mm_struct *mm,
+               unsigned long addr)
+{
+       return find_extend_vma_locked(mm, addr, false);
+}
 EXPORT_SYMBOL_GPL(find_extend_vma);
 
 /*
index f670d9979a26107304715cbcc68591114b8546ba..f4b4434c465fec91a1f58ffd29276ab9a014375d 100644 (file)
@@ -643,7 +643,8 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
  * expand a stack to a given address
  * - not supported under NOMMU conditions
  */
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
+int expand_stack(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked)
 {
        return -ENOMEM;
 }