From eca626637758474f47295550cafa6e4a75083ef7 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 26 Nov 2020 15:43:25 -0500 Subject: [PATCH] mm/mmap: Rework brk() to take interval tree locks when necessary interval tree locks are necessary for expand/contract of VMAs so use them. This path is also rather slow and cannot be taken if the anon_vma_chain is not a singular list Signed-off-by: Liam R. Howlett --- mm/mmap.c | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index b103e7b4b0d0..6b5fdaffb7b9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -236,7 +236,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) mm->brk = brk; goto success; } - + if (mm->brk > mm->start_brk) + mas_set(&mas, mm->brk - 1); brkvma = mas_walk(&mas); if (brkvma) { @@ -2965,7 +2966,10 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, arch_unmap(mm, newbrk, oldbrk); - if (vma->vm_start >= newbrk) { // remove entire mapping(s) + if (likely(vma->vm_start >= newbrk)) { // remove entire mapping(s) + mas_set(mas, newbrk); + if (vma->vm_start != newbrk) + mas_reset(mas); // cause a re-walk for the first overlap. ret = do_mas_munmap(mas, mm, newbrk, oldbrk-newbrk, uf, true); goto munmap_full_vma; } @@ -2980,10 +2984,16 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, // Change the oldbrk of vma to the newbrk of the munmap area vma_adjust_trans_huge(vma, vma->vm_start, newbrk, 0); + anon_vma_lock_write(vma->anon_vma); + anon_vma_interval_tree_pre_update_vma(vma); + vma->vm_end = newbrk; if (vma_mas_remove(&unmap, mas)) goto mas_store_fail; + anon_vma_interval_tree_post_update_vma(vma); + anon_vma_unlock_write(vma->anon_vma); + unmap_pages = vma_pages(&unmap); if (unmap.vm_flags & VM_LOCKED) { mm->locked_vm -= unmap_pages; @@ -3003,6 +3013,8 @@ munmap_full_vma: mas_store_fail: vma->vm_end = oldbrk; + anon_vma_interval_tree_post_update_vma(vma); + anon_vma_unlock_write(vma->anon_vma); return -ENOMEM; } @@ -3051,17 +3063,24 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma, if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) return -ENOMEM; - if (brkvma) { + if (brkvma && *brkvma) { vma = *brkvma; - /* Fast path, expand the existing vma if possible */ - if (vma && ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) { + /* Expand the existing vma if possible; almost never a singular + * list, so this will almost always fail. */ + if (list_is_singular(&vma->anon_vma_chain) && + ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) { + anon_vma_lock_write(vma->anon_vma); + anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = addr + len; mas->index = vma->vm_start; mas->last = vma->vm_end - 1; if (mas_store_gfp(mas, vma, GFP_KERNEL)) goto mas_mod_fail; + anon_vma_interval_tree_post_update_vma(vma); + anon_vma_unlock_write(vma->anon_vma); goto out; } + prev = vma; } /* create a vma struct for an anonymous mapping */ @@ -3078,9 +3097,9 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma, if (vma_mas_store(vma, mas)) goto mas_store_fail; - prev = mas_prev(mas, 0); + if (!prev) + prev = mas_prev(mas, 0); __vma_link_list(mm, vma, prev); - __vma_link_file(vma); mm->map_count++; *brkvma = vma; out: @@ -3101,6 +3120,8 @@ vma_alloc_fail: mas_mod_fail: vma->vm_end = addr; + anon_vma_interval_tree_post_update_vma(vma); + anon_vma_unlock_write(vma->anon_vma); return -ENOMEM; } -- 2.50.1