]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: Change munmap splitting order and move_vma()
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 30 Mar 2022 17:35:49 +0000 (13:35 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 26 Aug 2022 17:57:08 +0000 (13:57 -0400)
Splitting can be more efficient when done in the reverse order to
minimize VMA walking.  Change do_mas_align_munmap() to reduce walking of
the tree during split operations.

move_vma() must also be altered to remove the dependency of keeping the
original VMA as the active part of the split.  Look up the new VMA or
two if necessary.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/mmap.c
mm/mremap.c

index 6445fd386f0457d4b1855670481a61a4a4d7bc39..fd97e0f53fff5a50e7e7c0cd2ce91fe5ad7ef757 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2314,14 +2314,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
                return -ENOMEM;
 
        mas->last = end - 1;
-       /*
-        * If we need to split any vma, do it now to save pain later.
-        *
-        * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
-        * unmapped vm_area_struct will remain in use: so lower split_vma
-        * places tmp vma above, and higher split_vma places tmp vma below.
-        */
-
+       /* If we need to split any vma, do it now to save pain later. */
        /* Does it split the first one? */
        if (start > vma->vm_start) {
 
@@ -2337,18 +2330,18 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
                 * mas_pause() is not needed since mas->index needs to be set
                 * differently than vma->vm_end anyways.
                 */
-               error = __split_vma(mm, vma, start, 0);
+               error = __split_vma(mm, vma, start, 1);
                if (error)
                        goto start_split_failed;
 
-               mas_set(mas, start);
-               vma = mas_walk(mas);
+               mas_set(mas, start - 1);
+               prev = mas_walk(mas);
+       } else {
+               prev = mas_prev(mas, 0);
+               if (unlikely((!prev)))
+                       mas_set(mas, start);
        }
 
-       prev = mas_prev(mas, 0);
-       if (unlikely((!prev)))
-               mas_set(mas, start);
-
        /*
         * Detach a range of VMAs from the mm. Using next as a temp variable as
         * it is always overwritten.
@@ -2356,27 +2349,16 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
        mas_for_each(mas, next, end - 1) {
                /* Does it split the end? */
                if (next->vm_end > end) {
-                       struct vm_area_struct *split;
-
-                       error = __split_vma(mm, next, end, 1);
+                       error = __split_vma(mm, next, end, 0);
                        if (error)
                                goto end_split_failed;
 
                        mas_set(mas, end);
-                       split = mas_prev(mas, 0);
-                       error = munmap_sidetree(split, &mas_detach);
-                       if (error)
-                               goto munmap_sidetree_failed;
-
-                       count++;
-                       if (vma == next)
-                               vma = split;
-                       break;
                }
+
                error = munmap_sidetree(next, &mas_detach);
                if (error)
                        goto munmap_sidetree_failed;
-
                count++;
 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
                BUG_ON(next->vm_start < start);
@@ -2384,9 +2366,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 #endif
        }
 
-       if (!next)
-               next = mas_next(mas, ULONG_MAX);
-
+       next = mas_find(mas, ULONG_MAX);
        if (unlikely(uf)) {
                /*
                 * If userfaultfd_unmap_prep returns an error the vmas
index e465ffe279bb03ac38d20bf5b83ae1d4eb700c3c..b7d56d6d34a4fe29ee7d12963c9bb0728328066b 100644 (file)
@@ -580,9 +580,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        unsigned long vm_flags = vma->vm_flags;
        unsigned long new_pgoff;
        unsigned long moved_len;
-       unsigned long excess = 0;
+       unsigned long account_start = 0;
+       unsigned long account_end = 0;
        unsigned long hiwater_vm;
-       int split = 0;
        int err = 0;
        bool need_rmap_locks;
 
@@ -662,10 +662,10 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        /* Conceal VM_ACCOUNT so old reservation is not undone */
        if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
                vma->vm_flags &= ~VM_ACCOUNT;
-               excess = vma->vm_end - vma->vm_start - old_len;
-               if (old_addr > vma->vm_start &&
-                   old_addr + old_len < vma->vm_end)
-                       split = 1;
+               if (vma->vm_start < old_addr)
+                       account_start = vma->vm_start;
+               if (vma->vm_end > old_addr + old_len)
+                       account_end = vma->vm_end - 1;
        }
 
        /*
@@ -704,7 +704,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
                /* OOM: unable to split vma, just get accounts right */
                if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
                        vm_acct_memory(old_len >> PAGE_SHIFT);
-               excess = 0;
+               account_start = account_end = 0;
        }
 
        if (vm_flags & VM_LOCKED) {
@@ -715,10 +715,14 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        mm->hiwater_vm = hiwater_vm;
 
        /* Restore VM_ACCOUNT if one or two pieces of vma left */
-       if (excess) {
+       if (account_start) {
+               vma = vma_lookup(mm, account_start);
+               vma->vm_flags |= VM_ACCOUNT;
+       }
+
+       if (account_end) {
+               vma = vma_lookup(mm, account_end);
                vma->vm_flags |= VM_ACCOUNT;
-               if (split)
-                       find_vma(mm, vma->vm_end)->vm_flags |= VM_ACCOUNT;
        }
 
        return new_addr;