From: Liam R. Howlett Date: Tue, 20 Oct 2020 18:59:58 +0000 (-0400) Subject: mm/mmap: Fix unnecessary locking of anon_vma on expanding of a vma. X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=55340497ce91f89e3bcd5d085cc3503d8beadf15;p=users%2Fjedix%2Flinux-maple.git mm/mmap: Fix unnecessary locking of anon_vma on expanding of a vma. When expanding the end of a vma, it is unnecessary to loc the anon_vma tree for writing because the spinlock on the vma tree ensures that there are no concurrent vma expansions Signed-off-by: Liam R. Howlett --- diff --git a/mm/mmap.c b/mm/mmap.c index 362698fac7eb..728c62da9f91 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -242,7 +242,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) * Always allow shrinking brk. * do_brk_munmap() may downgrade mmap_lock to read. */ - if (brk <= mm->brk) { + if (brk <= mm->brk) { // newbrk < oldbrk int ret; /* * mm->brk must to be protected by write mmap_lock. @@ -618,35 +618,6 @@ void vma_store(struct mm_struct *mm, struct vm_area_struct *vma) vma_mt_store(mm, vma); } -/* vma_mt_brk() - Change the brk vma. - * */ -void vma_mt_brk(struct vm_area_struct *vma, unsigned long new_end) -{ - bool store = false; - - if (vma->anon_vma) { - anon_vma_lock_write(vma->anon_vma); - anon_vma_interval_tree_pre_update_vma(vma); - } - - // changing the back. - if (vma->vm_end > new_end) { - vma_mt_szero(vma->vm_mm, new_end, vma->vm_end); - vmacache_invalidate(vma->vm_mm); - } else - store = true; - - vma->vm_end = new_end; - if (store) - vma_mt_store(vma->vm_mm, vma); - - - if (vma->anon_vma) { - anon_vma_interval_tree_post_update_vma(vma); - anon_vma_unlock_write(vma->anon_vma); - } -} - static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev) { @@ -2933,7 +2904,17 @@ static int do_brk_munmap(struct vm_area_struct *vma, unsigned long newbrk, // Change the oldbrk of vma to the newbrk of the munmap area vma_adjust_trans_huge(vma, vma->vm_start, newbrk, 0); - vma_mt_brk(vma, newbrk); + if (vma->anon_vma) { + anon_vma_lock_write(vma->anon_vma); + anon_vma_interval_tree_pre_update_vma(vma); + } + vma_mt_szero(vma->vm_mm, newbrk, vma->vm_end); + vma->vm_end = newbrk; + vmacache_invalidate(vma->vm_mm); + if (vma->anon_vma) { + anon_vma_interval_tree_post_update_vma(vma); + anon_vma_unlock_write(vma->anon_vma); + } unmap_pages = vma_pages(&unmap); if (unmap.vm_flags & VM_LOCKED) { @@ -3008,7 +2989,8 @@ static int do_brk_flags(struct vm_area_struct **brkvma, unsigned long addr, vma = *brkvma; /* Fast path, expand the existing vma if possible */ if (vma && ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)){ - vma_mt_brk(vma, addr + len); + vma->vm_end = addr + len; + vma_mt_store(vma->vm_mm, vma); goto out; } }