]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
munmap
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Tue, 1 Mar 2022 20:17:48 +0000 (15:17 -0500)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Tue, 8 Mar 2022 18:19:15 +0000 (13:19 -0500)
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/mmap.c

index 5decb8782e042faac96fd7edf409828a52f5c7ae..74a4608ac64f7a0ebc0f9e4a588bc4fe90b04fed 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2286,6 +2286,16 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        return __split_vma(mm, vma, addr, new_below);
 }
 
+static inline void munmap_sidetree(struct vm_area_struct *vma,
+                                  struct ma_state *mas_detach)
+{
+       vma_mas_store(vma, mas_detach);
+       if (vma->vm_flags & VM_LOCKED) {
+               vma->vm_mm->locked_vm -= vma_pages(vma);
+               munlock_vma_pages_all(vma);
+       }
+}
+
 /*
  * do_mas_align_munmap() - munmap the aligned region from @start to @end.
  * @mas: The maple_state, ideally set up to alter the correct tree location.
@@ -2303,7 +2313,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
                    struct mm_struct *mm, unsigned long start,
                    unsigned long end, struct list_head *uf, bool downgrade)
 {
-       struct vm_area_struct *prev, *next;
+       struct vm_area_struct *prev, *next = NULL;
        struct maple_tree mt_detach;
        int count = 0;
        MA_STATE(mas_detach, &mt_detach, start, end - 1);
@@ -2311,7 +2321,6 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
        mt_set_external_lock(&mt_detach, &mm->mmap_lock);
 
        mas->last = end - 1;
-       prev = next = NULL;
        /*
         * If we need to split any vma, do it now to save pain later.
         *
@@ -2355,31 +2364,32 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
        mas_for_each(mas, next, end - 1) {
                /* Does it split the end? */
                if (next->vm_end > end) {
+                       struct vm_area_struct *split;
                        int error;
-                       struct vm_area_struct *tmp = next;
 
                        error = __split_vma(mm, next, end, 1);
                        if (error)
                                return error;
 
                        mas_set(mas, end);
-                       next = mas_prev(mas, 0);
-                       if (tmp == vma)
-                               vma = next;
+                       split = mas_prev(mas, 0);
+                       munmap_sidetree(split, &mas_detach);
+                       count++;
+                       if (vma == next)
+                               vma = split;
+                       break;
                }
                count++;
+               munmap_sidetree(next, &mas_detach);
 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
                BUG_ON(next->vm_start < start);
                BUG_ON(next->vm_start > end);
 #endif
-               vma_mas_store(next, &mas_detach);
-               if (next->vm_flags & VM_LOCKED) {
-                       mm->locked_vm -= vma_pages(next);
-                       munlock_vma_pages_all(next);
-               }
        }
 
-       next = mas_find(mas, ULONG_MAX);
+       if (!next)
+               next = mas_next(mas, ULONG_MAX);
+
        if (unlikely(uf)) {
                /*
                 * If userfaultfd_unmap_prep returns an error the vmas