]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/mmap: Fix unnecessary locking of anon_vma on expanding of a vma.
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Tue, 20 Oct 2020 18:59:58 +0000 (14:59 -0400)
committerLiam R. Howlett <Liam.Howlett@Oracle.com>
Tue, 5 Jan 2021 17:30:36 +0000 (12:30 -0500)
When expanding the end of a vma, it is unnecessary to loc the anon_vma tree for writing
because the spinlock on the vma tree ensures that there are no concurrent vma expansions

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
mm/mmap.c

index 362698fac7ebc4b5ac148a6c6645fe91a8759147..728c62da9f914a966ae53f71cbb2f2d501a6c72b 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -242,7 +242,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
                 * Always allow shrinking brk.
                 * do_brk_munmap() may downgrade mmap_lock to read.
                 */
-               if (brk <= mm->brk) {
+               if (brk <= mm->brk) { // newbrk < oldbrk
                        int ret;
                        /*
                         * mm->brk must to be protected by write mmap_lock.
@@ -618,35 +618,6 @@ void vma_store(struct mm_struct *mm, struct vm_area_struct *vma)
        vma_mt_store(mm, vma);
 }
 
-/* vma_mt_brk() - Change the brk vma.
- * */
-void vma_mt_brk(struct vm_area_struct *vma, unsigned long new_end)
-{
-       bool store = false;
-
-       if (vma->anon_vma) {
-               anon_vma_lock_write(vma->anon_vma);
-               anon_vma_interval_tree_pre_update_vma(vma);
-       }
-
-       // changing the back.
-       if (vma->vm_end > new_end) {
-               vma_mt_szero(vma->vm_mm, new_end, vma->vm_end);
-               vmacache_invalidate(vma->vm_mm);
-       } else
-               store = true;
-
-       vma->vm_end = new_end;
-       if (store)
-               vma_mt_store(vma->vm_mm, vma);
-
-
-       if (vma->anon_vma) {
-               anon_vma_interval_tree_post_update_vma(vma);
-               anon_vma_unlock_write(vma->anon_vma);
-       }
-}
-
 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
                        struct vm_area_struct *prev)
 {
@@ -2933,7 +2904,17 @@ static int do_brk_munmap(struct vm_area_struct *vma, unsigned long newbrk,
 
        // Change the oldbrk of vma to the newbrk of the munmap area
        vma_adjust_trans_huge(vma, vma->vm_start, newbrk, 0);
-       vma_mt_brk(vma, newbrk);
+       if (vma->anon_vma) {
+               anon_vma_lock_write(vma->anon_vma);
+               anon_vma_interval_tree_pre_update_vma(vma);
+       }
+       vma_mt_szero(vma->vm_mm, newbrk, vma->vm_end);
+       vma->vm_end = newbrk;
+       vmacache_invalidate(vma->vm_mm);
+       if (vma->anon_vma) {
+               anon_vma_interval_tree_post_update_vma(vma);
+               anon_vma_unlock_write(vma->anon_vma);
+       }
 
        unmap_pages = vma_pages(&unmap);
        if (unmap.vm_flags & VM_LOCKED) {
@@ -3008,7 +2989,8 @@ static int do_brk_flags(struct vm_area_struct **brkvma, unsigned long addr,
                vma = *brkvma;
                /* Fast path, expand the existing vma if possible */
                if (vma && ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)){
-                       vma_mt_brk(vma, addr + len);
+                       vma->vm_end = addr + len;
+                       vma_mt_store(vma->vm_mm, vma);
                        goto out;
                }
        }