]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
broken maple_wip
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Fri, 5 Mar 2021 14:33:47 +0000 (09:33 -0500)
committerLiam R. Howlett <Liam.Howlett@Oracle.com>
Fri, 5 Mar 2021 14:33:47 +0000 (09:33 -0500)
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
include/linux/interval_tree_generic.h
lib/maple_tree.c
mm/internal.h
mm/mmap.c

index aaa8a0767aa3a512c978d047556af3cee07af66f..0cc96cc5d4b68bafcd966683a84ca3b52d163d86 100644 (file)
@@ -42,6 +42,7 @@ ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node,                           \
        ITTYPE start = ITSTART(node), last = ITLAST(node);                    \
        ITSTRUCT *parent;                                                     \
        bool leftmost = true;                                                 \
+       printk("ack\n");\
                                                                              \
        while (*link) {                                                       \
                rb_parent = *link;                                            \
@@ -60,6 +61,7 @@ ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node,                           \
        rb_link_node(&node->ITRB, rb_parent, link);                           \
        rb_insert_augmented_cached(&node->ITRB, root,                         \
                                   leftmost, &ITPREFIX ## _augment);          \
+       printk("done\n");\
 }                                                                            \
                                                                              \
 ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node,                            \
index 40e3321a43f8f1312bc97b98b143f738c08c57e2..16d95541a82219f9cb5ad9d3bffa04c214db46df 100644 (file)
@@ -3666,6 +3666,7 @@ static inline bool mas_node_store(struct ma_state *mas, void *entry,
        // Store the new entry and range end.
        if (dst_offset < max_piv)
                dst_pivots[dst_offset] = mas->last;
+       mas->offset = dst_offset;
        rcu_assign_pointer(dst_slots[dst_offset++], entry);
 
        if (offset_end > end) // this range wrote to the end of the node.
@@ -5190,7 +5191,7 @@ void *mas_store(struct ma_state *mas, void *entry)
                return existing;
 
        if (unlikely(!mte_is_leaf(mas->node))) // spanning store occurred
-               mas->node = MAS_START;
+               mas_walk(mas);
 
        return existing;
 }
@@ -5220,7 +5221,7 @@ retry:
                return xa_err(mas->node);
 
        if (unlikely(!mte_is_leaf(mas->node))) // spanning store occurred
-               mas->node = MAS_START;
+               mas_walk(mas);
 
        return 0;
 
index baf47fcb85e306cb4ba55d559a024f610ee3b177..eed08586c8e6fe89f1c002418c4e3c5f0129c2fd 100644 (file)
@@ -353,9 +353,7 @@ static inline int vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas
 
        mas->index = vma->vm_start;
        mas->last = vma->vm_end - 1;
-       mas_lock(mas);
        ret = mas_store_gfp(mas, vma, GFP_KERNEL);
-       mas_unlock(mas);
        return ret;
 }
 
@@ -374,9 +372,7 @@ static inline int vma_mas_remove(struct vm_area_struct *vma, struct ma_state *ma
 
        mas->index = vma->vm_start;
        mas->last = vma->vm_end - 1;
-       mas_lock(mas);
        ret = mas_store_gfp(mas, NULL, GFP_KERNEL);
-       mas_unlock(mas);
        return ret;
 }
 
index 64a2addb47e2e48b31b1524a13671410ac3fa2ea..f87a0562cb1415dacd13c48af3da6f258510dcb2 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -232,6 +232,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
                goto success;
        }
 
+       mas_lock(&mas);
        mas_set(&mas, newbrk);
        brkvma = mas_walk(&mas);
        if (brkvma) { // munmap necessary, there is something at newbrk.
@@ -285,6 +286,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 
 success:
        populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
+       mas_unlock(&mas);
        if (downgraded)
                mmap_read_unlock(mm);
        else
@@ -295,6 +297,7 @@ success:
        return brk;
 
 out:
+       mas_unlock(&mas);
        mmap_write_unlock(mm);
        return origbrk;
 }
@@ -332,6 +335,7 @@ static void validate_mm(struct mm_struct *mm)
        }
        if (i != mm->map_count) {
                pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i);
+               mt_dump(mas.tree);
                bug = 1;
        }
 
@@ -494,7 +498,9 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
 {
        MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end - 1);
 
+       mas_lock(&mas);
        vma_mas_link(mm, vma, &mas);
+       mas_unlock(&mas);
 }
 
 /*
@@ -505,12 +511,14 @@ static inline void __insert_vm_struct(struct mm_struct *mm,
                                      struct vm_area_struct *vma)
 {
        MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end - 1);
+       mas_lock(&mas);
 
        BUG_ON(mas_find(&mas, vma->vm_end - 1));
        mas_reset(&mas);
 
        vma_mas_store(vma, &mas);
        mm->map_count++;
+       mas_unlock(&mas);
 }
 
 inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
@@ -615,8 +623,10 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        bool vma_changed = false;
        long adjust_next = 0;
        int remove_next = 0;
+       MA_STATE(mas, &mm->mm_mt, 0, 0);
 
        validate_mm(mm);
+       mas_lock(&mas);
        if (next && !insert) {
                struct vm_area_struct *exporter = NULL, *importer = NULL;
 
@@ -742,28 +752,34 @@ again:
        }
 
        if (start != vma->vm_start) {
-               if (vma->vm_start < start)
-                       vma_mt_szero(mm, vma->vm_start, start);
+               if (vma->vm_start < start) {
+                       mas_set_range(&mas, vma->vm_start, start - 1);
+                       mas_store_gfp(&mas, NULL, GFP_KERNEL);
+               }
                else
                        vma_changed = true;
                vma->vm_start = start;
        }
        if (end != vma->vm_end) {
-               if (vma->vm_end > end)
-                       vma_mt_szero(mm, end, vma->vm_end);
-               else
+               if (vma->vm_end > end) {
+                       mas_set_range(&mas, end, vma->vm_end - 1);
+                       mas_store_gfp(&mas, NULL, GFP_KERNEL);
+               } else
                        vma_changed = true;
                vma->vm_end = end;
        }
 
-       if (vma_changed)
-               vma_mt_store(mm, vma);
+       if (vma_changed) {
+               mas_reset(&mas);
+               vma_mas_store(vma, &mas);
+       }
 
        vma->vm_pgoff = pgoff;
        if (adjust_next) {
                next->vm_start += adjust_next;
                next->vm_pgoff += adjust_next >> PAGE_SHIFT;
-               vma_mt_store(mm, next);
+               mas_reset(&mas);
+               vma_mas_store(next, &mas);
        }
 
        if (file) {
@@ -781,7 +797,9 @@ again:
                 * us to insert it before dropping the locks
                 * (it may either follow vma or precede it).
                 */
-               __insert_vm_struct(mm, insert);
+               mas_reset(&mas);
+               vma_mas_store(insert, &mas);
+               mm->map_count++;
        }
 
        if (anon_vma) {
@@ -844,6 +862,7 @@ again:
        if (insert && file)
                uprobe_mmap(insert);
 
+       mas_unlock(&mas);
        validate_mm(mm);
        return 0;
 }
@@ -2197,6 +2216,444 @@ static void unmap_region(struct mm_struct *mm,
                      max);
        tlb_finish_mmu(&tlb, start, end);
 }
+
+/*
+ *
+ * Does not support inserting a new vma and modifying the other side of the vma
+ * mas will point to insert or the new zeroed area.
+ */
+static inline
+int vma_shrink(struct ma_state *mas, struct vm_area_struct *vma,
+              unsigned long start, unsigned long end, pgoff_t pgoff,
+              struct vm_area_struct *insert)
+#if 0
+{
+       struct mm_struct *mm = vma->vm_mm;
+       struct vm_area_struct *expand = NULL, *next = vma_next(mm, vma), *orig_vma = vma;
+       struct address_space *mapping = NULL;
+       struct rb_root_cached *root = NULL;
+       struct anon_vma *anon_vma = NULL;
+       struct file *file = vma->vm_file;
+       bool vma_changed = false;
+       long adjust_next = 0;
+       int remove_next = 0;
+
+       validate_mm(mm);
+       if (next && !insert) {
+               struct vm_area_struct *exporter = NULL, *importer = NULL;
+
+               if (end >= next->vm_end) {
+                       /*
+                        * vma expands, overlapping all the next, and
+                        * perhaps the one after too (mprotect case 6).
+                        * The only other cases that gets here are
+                        * case 1, case 7 and case 8.
+                        */
+                       if (next == expand) {
+                               /*
+                                * The only case where we don't expand "vma"
+                                * and we expand "next" instead is case 8.
+                                */
+                               VM_WARN_ON(end != next->vm_end);
+                               /*
+                                * remove_next == 3 means we're
+                                * removing "vma" and that to do so we
+                                * swapped "vma" and "next".
+                                */
+                               remove_next = 3;
+                               VM_WARN_ON(file != next->vm_file);
+                               swap(vma, next);
+                       } else {
+                               VM_WARN_ON(expand != vma);
+                               /*
+                                * case 1, 6, 7, remove_next == 2 is case 6,
+                                * remove_next == 1 is case 1 or 7.
+                                */
+                               remove_next = 1 + (end > next->vm_end);
+                               VM_WARN_ON(remove_next == 2 &&
+                                          end != vma_next(mm, next)->vm_end);
+                               /* trim end to next, for case 6 first pass */
+                               end = next->vm_end;
+                       }
+
+                       exporter = next;
+                       importer = vma;
+
+                       /*
+                        * If next doesn't have anon_vma, import from vma after
+                        * next, if the vma overlaps with it.
+                        */
+                       if (remove_next == 2 && !next->anon_vma)
+                               exporter = vma_next(mm, next);
+
+               } else if (end > next->vm_start) {
+                       /*
+                        * vma expands, overlapping part of the next:
+                        * mprotect case 5 shifting the boundary up.
+                        */
+                       adjust_next = (end - next->vm_start);
+                       exporter = next;
+                       importer = vma;
+                       VM_WARN_ON(expand != importer);
+               } else if (end < vma->vm_end) {
+                       /*
+                        * vma shrinks, and !insert tells it's not
+                        * split_vma inserting another: so it must be
+                        * mprotect case 4 shifting the boundary down.
+                        */
+                       adjust_next = -(vma->vm_end - end);
+                       exporter = vma;
+                       importer = next;
+                       VM_WARN_ON(expand != importer);
+               }
+
+               /*
+                * Easily overlooked: when mprotect shifts the boundary,
+                * make sure the expanding vma has anon_vma set if the
+                * shrinking vma had, to cover any anon pages imported.
+                */
+               if (exporter && exporter->anon_vma && !importer->anon_vma) {
+                       int error;
+
+                       importer->anon_vma = exporter->anon_vma;
+                       error = anon_vma_clone(importer, exporter);
+                       if (error)
+                               return error;
+               }
+       }
+again:
+       vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
+
+       if (file) {
+               mapping = file->f_mapping;
+               root = &mapping->i_mmap;
+               uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+
+               if (adjust_next)
+                       uprobe_munmap(next, next->vm_start, next->vm_end);
+
+               i_mmap_lock_write(mapping);
+               if (insert) {
+                       /*
+                        * Put into interval tree now, so instantiated pages
+                        * are visible to arm/parisc __flush_dcache_page
+                        * throughout; but we cannot insert into address
+                        * space until vma start or end is updated.
+                        */
+                       __vma_link_file(insert);
+               }
+       }
+
+       anon_vma = vma->anon_vma;
+       if (!anon_vma && adjust_next)
+               anon_vma = next->anon_vma;
+       if (anon_vma) {
+               VM_WARN_ON(adjust_next && next->anon_vma &&
+                          anon_vma != next->anon_vma);
+               anon_vma_lock_write(anon_vma);
+               anon_vma_interval_tree_pre_update_vma(vma);
+               if (adjust_next)
+                       anon_vma_interval_tree_pre_update_vma(next);
+       }
+
+       if (file) {
+               flush_dcache_mmap_lock(mapping);
+               vma_interval_tree_remove(vma, root);
+               if (adjust_next)
+                       vma_interval_tree_remove(next, root);
+       }
+
+       if (start != vma->vm_start) {
+               if (vma->vm_start < start) {
+                       mas_set_range(mas, vma->vm_start, start - 1);
+                       mas_store_gfp(mas, NULL, GFP_KERNEL);
+               }
+               else
+                       vma_changed = true;
+               vma->vm_start = start;
+       }
+       if (end != vma->vm_end) {
+               if (vma->vm_end > end) {
+                       mas_set_range(mas, end, vma->vm_end - 1);
+                       mas_store_gfp(mas, NULL, GFP_KERNEL);
+               } else
+                       vma_changed = true;
+               vma->vm_end = end;
+       }
+
+       if (vma_changed) {
+               mas_reset(mas);
+               vma_mas_store(vma, mas);
+       }
+
+       vma->vm_pgoff = pgoff;
+       if (adjust_next) {
+               next->vm_start += adjust_next;
+               next->vm_pgoff += adjust_next >> PAGE_SHIFT;
+               mas_reset(mas);
+               vma_mas_store(next, mas);
+       }
+
+       if (file) {
+               if (adjust_next)
+                       vma_interval_tree_insert(next, root);
+               vma_interval_tree_insert(vma, root);
+               flush_dcache_mmap_unlock(mapping);
+       }
+
+       if (remove_next && file) {
+               __remove_shared_vm_struct(next, file, mapping);
+       } else if (insert) {
+               /*
+                * split_vma has split insert from vma, and needs
+                * us to insert it before dropping the locks
+                * (it may either follow vma or precede it).
+                */
+               mas_reset(mas);
+               vma_mas_store(insert, mas);
+               mm->map_count++;
+       }
+
+       if (anon_vma) {
+               anon_vma_interval_tree_post_update_vma(vma);
+               if (adjust_next)
+                       anon_vma_interval_tree_post_update_vma(next);
+               anon_vma_unlock_write(anon_vma);
+       }
+
+       if (file) {
+               i_mmap_unlock_write(mapping);
+               uprobe_mmap(vma);
+
+               if (adjust_next)
+                       uprobe_mmap(next);
+       }
+
+       if (remove_next) {
+               if (file) {
+                       uprobe_munmap(next, next->vm_start, next->vm_end);
+                       fput(file);
+               }
+               if (next->anon_vma)
+                       anon_vma_merge(vma, next);
+               mm->map_count--;
+               mpol_put(vma_policy(next));
+               vm_area_free(next);
+               /*
+                * In mprotect's case 6 (see comments on vma_merge),
+                * we must remove another next too. It would clutter
+                * up the code too much to do both in one go.
+                */
+               if (remove_next != 3) {
+                       /*
+                        * If "next" was removed and vma->vm_end was
+                        * expanded (up) over it, in turn
+                        * "next->vm_prev->vm_end" changed and the
+                        * "vma->vm_next" gap must be updated.
+                        */
+                       next = vma_next(mm, vma);
+               } else {
+                       /*
+                        * For the scope of the comment "next" and
+                        * "vma" considered pre-swap(): if "vma" was
+                        * removed, next->vm_start was expanded (down)
+                        * over it and the "next" gap must be updated.
+                        * Because of the swap() the post-swap() "vma"
+                        * actually points to pre-swap() "next"
+                        * (post-swap() "next" as opposed is now a
+                        * dangling pointer).
+                        */
+                       next = vma;
+               }
+               if (remove_next == 2) {
+                       remove_next = 1;
+                       end = next->vm_end;
+                       goto again;
+               }
+       }
+       if (insert && file)
+               uprobe_mmap(insert);
+
+       validate_mm(mm);
+       return 0;
+}
+#else
+{
+       struct mm_struct *mm = vma->vm_mm;
+       struct address_space *mapping = NULL;
+       struct rb_root_cached *root = NULL;
+       struct anon_vma *anon_vma = NULL;
+       struct file *file = vma->vm_file;
+       unsigned long old_end = vma->vm_end, old_start = vma->vm_start;
+
+       validate_mm(mm);
+       printk("%s: %px start %lu - %lu => %lu - %lu\n", __func__, vma, vma->vm_start,
+              vma->vm_end, start, end);
+
+       vma_adjust_trans_huge(vma, start, end, 0);
+       if (file) {
+               mapping = file->f_mapping;
+               root = &mapping->i_mmap;
+               uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+
+               i_mmap_lock_write(mapping);
+               /*
+                * Put into interval tree now, so instantiated pages are visible
+                * to arm/parisc __flush_dcache_page throughout; but we cannot
+                * insert into address space until vma start or end is updated.
+                */
+
+               if (insert) {
+                       printk("insert %lu - %lu to interval\n",
+                              insert->vm_start, insert->vm_end);
+                       __vma_link_file(insert);
+                       printk("Done\n");
+               }
+       }
+
+       anon_vma = vma->anon_vma;
+       if (anon_vma) {
+               anon_vma_lock_write(anon_vma);
+               anon_vma_interval_tree_pre_update_vma(vma);
+       }
+
+       if (file) {
+               flush_dcache_mmap_lock(mapping);
+               printk("remove %lu - %lu to interval\n",
+                              vma->vm_start, vma->vm_end);
+               vma_interval_tree_remove(vma, root);
+               printk("Done\n");
+       }
+
+       vma->vm_start = start;
+       vma->vm_end = end;
+       vma->vm_pgoff = pgoff;
+       if (!insert) {
+
+               /* If vm_start changed, and the insert does not end at the old
+                * start, then that area needs to be zeroed
+                */
+               if (old_start != vma->vm_start) {
+                       mas->last = end;
+                       mas_store_gfp(mas, NULL, GFP_KERNEL);
+               }
+
+               /* If vm_end changed, and the insert does not start at the new
+                * end, then that area needs to be zeroed
+                */
+               if (old_end != vma->vm_end) {
+                       mas->index = end;
+                       mas->last = old_end;
+                       mas_store_gfp(mas, NULL, GFP_KERNEL);
+               }
+       }
+
+       if (file) {
+               printk("insert %lu - %lu to interval\n",
+                              vma->vm_start, vma->vm_end);
+               vma_interval_tree_insert(vma, root);
+               printk("Done\n");
+               flush_dcache_mmap_unlock(mapping);
+       }
+
+       if (insert) {  // Insert.
+               vma_mas_store(insert, mas);
+               mm->map_count++;
+       }
+
+       if (anon_vma) {
+               anon_vma_interval_tree_post_update_vma(vma);
+               anon_vma_unlock_write(anon_vma);
+       }
+
+       if (file) {
+               i_mmap_unlock_write(mapping);
+               uprobe_mmap(vma);
+               if (insert)
+                       uprobe_mmap(insert);
+       }
+
+       printk("%s: %px end\n", __func__, vma);
+       validate_mm(mm);
+       return 0;
+}
+#endif
+
+/*
+ * mas_split_vma() - Split the VMA into two.
+ *
+ * @mm: The mm_struct
+ * @mas: The maple state - must point to the vma being altered
+ * @vma: The vma to split
+ * @addr: The address to split @vma
+ * @new_below: Add the new vma at the lower address (first part) of vma.
+ *
+ * Note: The @mas must point to the vma that is being split or MAS_START.
+ * Upon return, @mas points to the new VMA.
+ */
+int mas_split_vma(struct mm_struct *mm, struct ma_state *mas,
+                 struct vm_area_struct *vma, unsigned long addr, int new_below)
+{
+       struct vm_area_struct *new;
+       int err;
+
+       validate_mm(mm);
+       if (vma->vm_ops && vma->vm_ops->may_split) {
+               err = vma->vm_ops->may_split(vma, addr);
+               if (err)
+                       return err;
+       }
+
+       new = vm_area_dup(vma);
+       if (!new)
+               return -ENOMEM;
+
+       if (new_below)
+               new->vm_end = addr;
+       else {
+               new->vm_start = addr;
+               new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
+       }
+
+       err = vma_dup_policy(vma, new);
+       if (err)
+               goto out_free_vma;
+
+       err = anon_vma_clone(new, vma);
+       if (err)
+               goto out_free_mpol;
+
+       if (new->vm_file)
+               get_file(new->vm_file);
+
+       if (new->vm_ops && new->vm_ops->open)
+               new->vm_ops->open(new);
+
+       if (new_below)
+               err = vma_shrink(mas, vma, addr, vma->vm_end, vma->vm_pgoff +
+                       ((addr - new->vm_start) >> PAGE_SHIFT), new);
+       else
+               err = vma_shrink(mas, vma, vma->vm_start, addr, vma->vm_pgoff,
+                                new);
+
+       validate_mm(mm);
+       /* Success. */
+       if (!err)
+               return 0;
+
+       /* Clean everything up if vma_adjust failed. */
+       if (new->vm_ops && new->vm_ops->close)
+               new->vm_ops->close(new);
+       if (new->vm_file)
+               fput(new->vm_file);
+       unlink_anon_vmas(new);
+ out_free_mpol:
+       mpol_put(vma_policy(new));
+ out_free_vma:
+       vm_area_free(new);
+       return err;
+}
+
 /*
  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
  * has already been checked or doesn't make sense to fail.
@@ -2274,44 +2731,32 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        return __split_vma(mm, vma, addr, new_below);
 }
 
-static inline unsigned long detach_range(struct mm_struct *mm,
-               struct ma_state *src, struct ma_state *dst,
-               struct vm_area_struct **vma, struct vm_area_struct *prev)
+
+static inline void detach_range(struct mm_struct *mm, struct ma_state *mas,
+                       struct ma_state *dst, struct vm_area_struct **vma)
 {
+       unsigned long start = dst->index;
+       unsigned long end = dst->last;
        int count = 0;
-       struct ma_state mas;
 
-       /*
-        * unlock any mlock()ed ranges before detaching vmas, count the number
-        * of VMAs to be dropped, and return the tail entry of the affected
-        * area.
-        */
-       mas = *src;
        do {
-               BUG_ON((*vma)->vm_start < src->index);
-               BUG_ON((*vma)->vm_end > (src->last + 1));
                count++;
+               *vma = mas_prev(mas, start);
+               BUG_ON((*vma)->vm_start < start);
+               BUG_ON((*vma)->vm_end > end + 1);
                if ((*vma)->vm_flags & VM_LOCKED) {
                        mm->locked_vm -= vma_pages(*vma);
                        munlock_vma_pages_all(*vma);
                }
+
                vma_mas_store(*vma, dst);
-       } while ((*vma = mas_find(&mas, src->last)) != NULL);
+       } while ((*vma)->vm_start > start);
 
-       mas_set(&mas, src->last + 1);
        /* Drop removed area from the tree */
-       *vma = mas_find(&mas, ULONG_MAX);
-       mas_lock(src);
-       mas_store_gfp(src, NULL, GFP_KERNEL);
-       mas_unlock(src);
+       mas->last = end;
+       mas_store_gfp(mas, NULL, GFP_KERNEL);
        /* Decrement map_count */
        mm->map_count -= count;
-       /* Set the upper limit */
-       if (!(*vma))
-               return USER_PGTABLES_CEILING;
-
-       validate_mm(mm);
-       return (*vma)->vm_start;
 }
 
 /* do_mas_align_munmap() - munmap the aligned region from @start to @end.
@@ -2330,15 +2775,15 @@ static int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
               struct mm_struct *mm, unsigned long start, unsigned long end,
               struct list_head *uf, bool downgrade)
 {
-       struct vm_area_struct *prev, *last;
+       struct vm_area_struct *prev, *last, *next = NULL;
        struct maple_tree mt_detach;
-       unsigned long max;
-       MA_STATE(dst, NULL, start, start);
+       unsigned long max = USER_PGTABLES_CEILING;
+       MA_STATE(dst, NULL, start, end - 1);
        struct ma_state tmp;
        /* we have start < vma->vm_end  */
 
        validate_mm(mm);
-        /* arch_unmap() might do unmaps itself.  */
+       /* arch_unmap() might do unmaps itself.  */
        arch_unmap(mm, start, end);
 
        /*
@@ -2358,36 +2803,33 @@ static int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
                if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
                        return -ENOMEM;
 
-               error = __split_vma(mm, vma, start, 0);
+               printk("Split start %lu-%lu\n", vma->vm_start, vma->vm_end);
+               error = mas_split_vma(mm, mas, vma, start, 0);
                if (error)
                        return error;
                prev = vma;
-               // Split invalidated node, reset.
-               mas_set_range(mas, start, end - 1);
+               vma = mas_walk(mas);
        } else {
                tmp = *mas;
                prev = mas_prev(&tmp, 0);
        }
 
-       if (vma->vm_end >= end)
+       if (end < vma->vm_end) {
                last = vma;
-       else {
-               tmp = *mas;
-               mas_set(&tmp, end - 1);
-               last = mas_walk(&tmp);
+       } else {
+               mas_set(mas, end - 1);
+               last = mas_walk(mas);
        }
 
        /* Does it split the last one? */
        if (last && end < last->vm_end) {
-               int error = __split_vma(mm, last, end, 1);
+               int error;
+               printk("Split end %lu-%lu\n", vma->vm_start, vma->vm_end);
+               error = mas_split_vma(mm, mas, last, end, 1);
                if (error)
                        return error;
-               // Split invalidated node, reset.
-               mas_set_range(mas, start, end - 1);
        }
-
-       if (mas->node == MAS_START)
-               vma = mas_find(mas, end - 1);
+       next = mas_next(mas, ULONG_MAX);
 
        if (unlikely(uf)) {
                /*
@@ -2406,10 +2848,12 @@ static int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
        }
 
        /* Point of no return */
+       if (next)
+               max = next->vm_start;
+
        mtree_init(&mt_detach, MAPLE_ALLOC_RANGE);
        dst.tree = &mt_detach;
-       mas->last = end - 1;
-       max = detach_range(mm, mas, &dst, &vma, prev);
+       detach_range(mm, mas, &dst, &vma);
 
        /*
         * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
@@ -2417,7 +2861,7 @@ static int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
         * down_read(mmap_lock) and collide with the VMA we are about to unmap.
         */
        if (downgrade) {
-               if (vma && (vma->vm_flags & VM_GROWSDOWN))
+               if (next && (next->vm_flags & VM_GROWSDOWN))
                        downgrade = false;
                else if (prev && (prev->vm_flags & VM_GROWSUP))
                        downgrade = false;
@@ -2426,13 +2870,10 @@ static int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
        }
 
        /* Unmap the region */
-       mas_set(&dst, start);
-       tmp = dst;
-       vma = mas_find(&dst, end - 1); // head of list.
        unmap_region(mm, vma, &dst, start, end, prev, max);
 
        /* Statistics and freeing VMAs */
-       dst = tmp;
+       mas_set(&dst, start);
        remove_mt(mm, &dst);
 
        mtree_destroy(&mt_detach);
@@ -2488,8 +2929,13 @@ int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
              struct list_head *uf)
 {
+       int ret;
        MA_STATE(mas, &mm->mm_mt, start, start);
-       return do_mas_munmap(&mas, mm, start, len, uf, false);
+
+       mas_lock(&mas);
+       ret = do_mas_munmap(&mas, mm, start, len, uf, false);
+       mas_unlock(&mas);
+       return ret;
 }
 
 unsigned long mmap_region(struct file *file, unsigned long addr,
@@ -2509,7 +2955,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        struct ma_state ma_prev, tmp;
        MA_STATE(mas, &mm->mm_mt, addr, end - 1);
 
-       validate_mm(mm);
 
        /* Check against address space limit. */
        if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
@@ -2526,17 +2971,23 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
                        return -ENOMEM;
        }
 
+       mas_lock(&mas);
+       validate_mm(mm);
        /* Unmap any existing mapping in the area */
-       if (do_mas_munmap(&mas, mm, addr, len, uf, false))
+       if (do_mas_munmap(&mas, mm, addr, len, uf, false)) {
+               mas_unlock(&mas);
                return -ENOMEM;
+       }
 
        /*
         * Private writable mapping: check memory availability
         */
        if (accountable_mapping(file, vm_flags)) {
                charged = len >> PAGE_SHIFT;
-               if (security_vm_enough_memory_mm(mm, charged))
+               if (security_vm_enough_memory_mm(mm, charged)) {
+                       mas_unlock(&mas);
                        return -ENOMEM;
+               }
                vm_flags |= VM_ACCOUNT;
        }
 
@@ -2628,8 +3079,8 @@ cannot_expand:
                 * Answer: Yes, several device drivers can do it in their
                 *         f_op->mmap method. -DaveM
                 */
-               WARN_ON_ONCE(addr != vma->vm_start);
                if (addr != vma->vm_start) {
+                       WARN_ON_ONCE(addr != vma->vm_start);
                        addr = vma->vm_start;
                        mas_set_range(&mas, addr, end - 1);
                }
@@ -2718,8 +3169,9 @@ expanded:
        vma->vm_flags |= VM_SOFTDIRTY;
 
        vma_set_page_prot(vma);
-
        validate_mm(mm);
+       mas_unlock(&mas);
+
        return addr;
 
 unmap_and_free_vma:
@@ -2740,6 +3192,7 @@ free_vma:
 unacct_error:
        if (charged)
                vm_unacct_memory(charged);
+       mas_unlock(&mas);
        return error;
 }
 
@@ -2752,8 +3205,9 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
 
        if (mmap_write_lock_killable(mm))
                return -EINTR;
-
+       mas_lock(&mas);
        ret = do_mas_munmap(&mas, mm, start, len, &uf, downgrade);
+       mas_unlock(&mas);
        /*
         * Returning 1 indicates mmap_lock is downgraded.
         * But 1 is not legal return value of vm_munmap() and munmap(), reset
@@ -2824,6 +3278,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
        if (!vma->vm_file)
                goto out;
 
+       mas_lock(&mas);
        if (start + size > vma->vm_end) {
                struct vm_area_struct *prev, *next;
 
@@ -2861,6 +3316,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
                        prot, flags, pgoff, &populate, NULL);
        fput(file);
 out:
+       mas_unlock(&mas);
        mmap_write_unlock(mm);
        if (populate)
                mm_populate(ret, populate);
@@ -3022,12 +3478,10 @@ static int do_brk_flags(struct ma_state *mas, struct ma_state *ma_prev,
                        }
                        vma->vm_end = addr + len;
                        vma->vm_flags |= VM_SOFTDIRTY;
-                       mas_lock(ma_prev);
                        if (mas_store_gfp(ma_prev, vma, GFP_KERNEL)) {
                                mas_unlock(ma_prev);
                                goto mas_mod_fail;
                        }
-                       mas_unlock(ma_prev);
                        if (vma->anon_vma) {
                                anon_vma_interval_tree_post_update_vma(vma);
                                anon_vma_unlock_write(vma->anon_vma);
@@ -3098,9 +3552,11 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
                return -EINTR;
 
        // This vma left intentionally blank.
+       mas_lock(&mas);
        mas_walk(&mas);
        ret = do_brk_flags(&mas, &mas, &vma, addr, len, flags);
        populate = ((mm->def_flags & VM_LOCKED) != 0);
+       mas_unlock(&mas);
        mmap_write_unlock(mm);
        if (populate && !ret)
                mm_populate_vma(vma, addr, addr + len);
@@ -3150,6 +3606,7 @@ void exit_mmap(struct mm_struct *mm)
                mmap_write_unlock(mm);
        }
 
+       mas_lock(&mas);
        if (mm->locked_vm) {
                mas_for_each(&mas, vma, ULONG_MAX) {
                        if (vma->vm_flags & VM_LOCKED) {
@@ -3163,8 +3620,10 @@ void exit_mmap(struct mm_struct *mm)
        arch_exit_mmap(mm);
 
        vma = mas_find(&mas, ULONG_MAX);
-       if (!vma)       /* Can happen if dup_mmap() received an OOM */
+       if (!vma) { /* Can happen if dup_mmap() received an OOM */
+               mas_unlock(&mas);
                return;
+       }
 
        lru_add_drain();
        flush_cache_mm(mm);
@@ -3188,6 +3647,7 @@ void exit_mmap(struct mm_struct *mm)
                cond_resched();
        }
 
+       mas_unlock(&mas);
        trace_exit_mmap(mm);
        mtree_destroy(&mm->mm_mt);
        vm_unacct_memory(nr_accounted);
@@ -3595,6 +4055,7 @@ int mm_take_all_locks(struct mm_struct *mm)
        BUG_ON(mmap_read_trylock(mm));
 
        mutex_lock(&mm_all_locks_mutex);
+       mas_lock(&mas);
 
        mas_for_each(&mas, vma, ULONG_MAX) {
                if (signal_pending(current))
@@ -3622,6 +4083,7 @@ int mm_take_all_locks(struct mm_struct *mm)
                                vm_lock_anon_vma(mm, avc->anon_vma);
        }
 
+       mas_unlock(&mas);
        return 0;
 
 out_unlock:
@@ -3675,6 +4137,8 @@ void mm_drop_all_locks(struct mm_struct *mm)
        BUG_ON(mmap_read_trylock(mm));
        BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
 
+       mas_lock(&mas);
+
        mas_for_each(&mas, vma, ULONG_MAX) {
                if (vma->anon_vma)
                        list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
@@ -3684,6 +4148,7 @@ void mm_drop_all_locks(struct mm_struct *mm)
        }
 
        mutex_unlock(&mm_all_locks_mutex);
+       mas_unlock(&mas);
 }
 
 /*