]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: Change munmap splitting order and move_vma()
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 30 Mar 2022 17:35:49 +0000 (13:35 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 28 Apr 2022 15:18:01 +0000 (11:18 -0400)
Splitting can be more efficient when done in the reverse order to
minimize VMA walking.  Change do_mas_align_munmap() to reduce walking of
the tree during split operations.

move_vma() must also be altered to remove the dependency of keeping the
original VMA as the active part of the split.  Look up the new VMA or
two if necessary.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/mmap.c
mm/mremap.c

index f52bbf9aa0245fb128990ea3f813440f63738f7f..b39f1b4f79db976df0e450c0bea8906b198938aa 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2338,14 +2338,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
        mt_set_external_lock(&mt_detach, &mm->mmap_lock);
 
        mas->last = end - 1;
-       /*
-        * If we need to split any vma, do it now to save pain later.
-        *
-        * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
-        * unmapped vm_area_struct will remain in use: so lower split_vma
-        * places tmp vma above, and higher split_vma places tmp vma below.
-        */
-
+       /* If we need to split any vma, do it now to save pain later. */
        /* Does it split the first one? */
        if (start > vma->vm_start) {
                int error;
@@ -2362,18 +2355,18 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
                 * mas_pause() is not needed since mas->index needs to be set
                 * differently than vma->vm_end anyways.
                 */
-               error = __split_vma(mm, vma, start, 0);
+               error = __split_vma(mm, vma, start, 1);
                if (error)
                        return error;
 
-               mas_set(mas, start);
-               vma = mas_walk(mas);
+               mas_set(mas, start - 1);
+               prev = mas_walk(mas);
+       } else {
+               prev = mas_prev(mas, 0);
+               if (unlikely((!prev)))
+                       mas_set(mas, start);
        }
 
-       prev = mas_prev(mas, 0);
-       if (unlikely((!prev)))
-               mas_set(mas, start);
-
        /*
         * Detach a range of VMAs from the mm. Using next as a temp variable as
         * it is always overwritten.
@@ -2381,20 +2374,13 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
        mas_for_each(mas, next, end - 1) {
                /* Does it split the end? */
                if (next->vm_end > end) {
-                       struct vm_area_struct *split;
                        int error;
 
-                       error = __split_vma(mm, next, end, 1);
+                       error = __split_vma(mm, next, end, 0);
                        if (error)
                                return error;
 
                        mas_set(mas, end);
-                       split = mas_prev(mas, 0);
-                       munmap_sidetree(split, &mas_detach);
-                       count++;
-                       if (vma == next)
-                               vma = split;
-                       break;
                }
                count++;
                munmap_sidetree(next, &mas_detach);
@@ -2404,9 +2390,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 #endif
        }
 
-       if (!next)
-               next = mas_next(mas, ULONG_MAX);
-
+       next = mas_find(mas, ULONG_MAX);
        if (unlikely(uf)) {
                /*
                 * If userfaultfd_unmap_prep returns an error the vmas
index c0d32330d43532c630776c601a05b2a0214fa353..8d9f89e49bd82d84767fa90ee95136980ba23aa3 100644 (file)
@@ -578,9 +578,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        unsigned long vm_flags = vma->vm_flags;
        unsigned long new_pgoff;
        unsigned long moved_len;
-       unsigned long excess = 0;
+       unsigned long account_start = 0;
+       unsigned long account_end = 0;
        unsigned long hiwater_vm;
-       int split = 0;
        int err = 0;
        bool need_rmap_locks;
 
@@ -660,10 +660,10 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        /* Conceal VM_ACCOUNT so old reservation is not undone */
        if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
                vma->vm_flags &= ~VM_ACCOUNT;
-               excess = vma->vm_end - vma->vm_start - old_len;
-               if (old_addr > vma->vm_start &&
-                   old_addr + old_len < vma->vm_end)
-                       split = 1;
+               if (vma->vm_start < old_addr)
+                       account_start = vma->vm_start;
+               if (vma->vm_end > old_addr + old_len)
+                       account_end = vma->vm_end - 1;
        }
 
        /*
@@ -702,7 +702,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
                /* OOM: unable to split vma, just get accounts right */
                if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
                        vm_acct_memory(old_len >> PAGE_SHIFT);
-               excess = 0;
+               account_start = account_end = 0;
        }
 
        if (vm_flags & VM_LOCKED) {
@@ -713,10 +713,14 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        mm->hiwater_vm = hiwater_vm;
 
        /* Restore VM_ACCOUNT if one or two pieces of vma left */
-       if (excess) {
+       if (account_start) {
+               vma = vma_lookup(mm, account_start);
+               vma->vm_flags |= VM_ACCOUNT;
+       }
+
+       if (account_end) {
+               vma = vma_lookup(mm, account_end);
                vma->vm_flags |= VM_ACCOUNT;
-               if (split)
-                       find_vma(mm, vma->vm_end)->vm_flags |= VM_ACCOUNT;
        }
 
        return new_addr;