]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
wip maple3_only
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Wed, 22 Jul 2020 20:23:32 +0000 (16:23 -0400)
committerLiam R. Howlett <Liam.Howlett@Oracle.com>
Thu, 23 Jul 2020 02:38:19 +0000 (22:38 -0400)
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
drivers/firmware/efi/efi.c
include/linux/mm_types.h
init/main.c
kernel/fork.c
lib/maple_tree.c
mm/init-mm.c
mm/mmap.c

index 5114cae4ec9735dad8c0d809176957ce2ba676b2..8eaf97febbc87a446ecdf2a99b91fb1df74c992e 100644 (file)
@@ -51,7 +51,6 @@ static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
 
 struct mm_struct efi_mm = {
-       .mm_rb                  = RB_ROOT,
        .mm_users               = ATOMIC_INIT(2),
        .mm_count               = ATOMIC_INIT(1),
        MMAP_LOCK_INITIALIZER(efi_mm)
index 90021d0c053095e450355d022e5b683f910d39fa..104d45ba921d4384817f879fdb6cf43887abd58c 100644 (file)
@@ -386,7 +386,6 @@ struct mm_struct {
        struct {
                struct vm_area_struct *mmap;            /* list of VMAs */
                struct maple_tree mm_mt;
-               struct rb_root mm_rb;
                u64 vmacache_seqnum;                   /* per-thread vmacache */
 #ifdef CONFIG_MMU
                unsigned long (*get_unmapped_area) (struct file *filp,
index ffaa069db279e4ed2124695e9ab9b6f85cd1f578..445272bbd81116fc191f7e8fa32b7bfe8b935e51 100644 (file)
@@ -1331,6 +1331,7 @@ static int run_init_process(const char *init_filename)
        pr_debug("  with environment:\n");
        for (p = envp_init; *p; p++)
                pr_debug("    %s\n", *p);
+       printk("%s %d %s\n", __func__, __LINE__, init_filename);
        return do_execve(getname_kernel(init_filename),
                (const char __user *const __user *)argv_init,
                (const char __user *const __user *)envp_init);
index 8b38eebf7e106f390e7a195469b1c9ed723d0e8b..05ed8ca91d984a4656cddfc8d865fca1c4474c2a 100644 (file)
@@ -485,10 +485,10 @@ EXPORT_SYMBOL(free_task);
 static __latent_entropy int dup_mmap(struct mm_struct *mm,
                                        struct mm_struct *oldmm)
 {
-       struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
-       struct rb_node **rb_link, *rb_parent;
+       struct vm_area_struct *tmp, *prev, **pprev, *mpnt;
        int retval;
        unsigned long charge;
+       MA_STATE(old_mas, &oldmm->mm_mt, 0, 0);
        LIST_HEAD(uf);
 
        uprobe_start_dup_mmap();
@@ -511,8 +511,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
        mm->exec_vm = oldmm->exec_vm;
        mm->stack_vm = oldmm->stack_vm;
 
-       rb_link = &mm->mm_rb.rb_node;
-       rb_parent = NULL;
        pprev = &mm->mmap;
        retval = ksm_fork(mm, oldmm);
        if (retval)
@@ -522,9 +520,16 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                goto out;
 
        prev = NULL;
-       for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
+       mas_lock(&old_mas);
+       mas_for_each(&old_mas, mpnt, ULONG_MAX) {
                struct file *file;
 
+               if (xa_is_zero(mpnt))
+                       continue;
+
+               mas_pause(&old_mas);
+               mas_unlock(&old_mas);
+
                if (mpnt->vm_flags & VM_DONTCOPY) {
                        vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
                        continue;
@@ -591,7 +596,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                 */
                if (is_vm_hugetlb_page(tmp))
                        reset_vma_resv_huge_pages(tmp);
-
                /*
                 * Link in the new vma and copy the page table entries.
                 */
@@ -600,11 +604,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                tmp->vm_prev = prev;
                prev = tmp;
 
-               __vma_link_rb(mm, tmp, rb_link, rb_parent);
-               rb_link = &tmp->vm_rb.rb_right;
-               rb_parent = &tmp->vm_rb;
-
-               /* Link the vma into the MT */
+               /* Insert the vma into the new mm vma tree. */
                vma_store(mm, tmp);
 
                mm->map_count++;
@@ -616,7 +616,10 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 
                if (retval)
                        goto out;
+               mas_lock(&old_mas);
        }
+       mas_unlock(&old_mas);
+
        /* a new mm has just been created */
        retval = arch_dup_mmap(oldmm, mm);
 out:
@@ -672,7 +675,7 @@ static void check_mm(struct mm_struct *mm)
                long x = atomic_long_read(&mm->rss_stat.count[i]);
 
                if (unlikely(x))
-                       pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
+                       pr_alert("BUG: Bad rss-counter state mm:%px type:%s val:%ld\n",
                                 mm, resident_page_types[i], x);
        }
 
@@ -1021,7 +1024,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        struct user_namespace *user_ns)
 {
        mm->mmap = NULL;
-       mm->mm_rb = RB_ROOT;
        mt_init_flags(&mm->mm_mt, MAPLE_ALLOC_RANGE);
        mm->vmacache_seqnum = 0;
        atomic_set(&mm->mm_users, 1);
index 3c4c9d238809f1f2b7c4c47ab35852f9f18161cd..6d5be4de3bd0e440badd7e544c9440534687c6d5 100644 (file)
@@ -1432,7 +1432,7 @@ static inline struct maple_enode *mas_find_l_split(struct ma_state *mas)
        unsigned char slot, end = mas_data_end(mas);
        struct maple_enode *en = NULL;
 
-       for (slot = 0; i <= end; slot++) {
+       for (slot = 0; slot <= end; slot++) {
                en = mas_check_split_parent(mas, slot);
                if (en)
                        break;
@@ -3239,7 +3239,7 @@ static inline void *mas_last_entry(struct ma_state *mas,
        while (range_start < limit) {
                mas_set_slot(mas, slot);
                if (!mas_next_nentry(mas, limit, &range_start)) {
-                       *entry = mas_get_rcu_slot(mas, slot - 1);
+                       entry = mas_get_rcu_slot(mas, slot - 1);
                        if (mte_is_leaf(mas->node)) {
                                mas->index = range_start - 1;
                                mas->index = mte_get_pivot(mas->node, slot - 1);
@@ -3471,6 +3471,7 @@ static inline bool _mas_awalk(struct ma_state *mas, unsigned long size)
        switch (type) {
        default:
                slot = mas_get_slot(mas);
+               fallthrough;
        case maple_leaf_64:
                min = mas_get_safe_lower_bound(mas, slot);
                for (; slot <= pivot_cnt; slot++) {
@@ -4095,6 +4096,9 @@ void *mas_next(struct ma_state *mas, unsigned long max)
 {
        unsigned long index = 0;
 
+       if (!xa_is_node(mas->tree->ma_root))
+           return NULL;
+
        return _mas_next(mas, max, &index);
 }
 EXPORT_SYMBOL_GPL(mas_next);
index 27229044a070211f08e930c1070d8a6cc6d26572..49d803fc4cdeaa5bf6cb34a9563b22ef9d513c6f 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/mm_types.h>
-#include <linux/rbtree.h>
 #include <linux/maple_tree.h>
 #include <linux/rwsem.h>
 #include <linux/spinlock.h>
@@ -28,7 +27,6 @@
  * and size this cpu_bitmask to NR_CPUS.
  */
 struct mm_struct init_mm = {
-       .mm_rb          = RB_ROOT,
        .mm_mt          = MTREE_INIT(mm_mt, MAPLE_ALLOC_RANGE),
        .pgd            = swapper_pg_dir,
        .mm_users       = ATOMIC_INIT(2),
index 67e182e995a6eb4d6d1e4d71568d3b5c3f53d825..3e5dc27035df4a3fb93a6709a0ffd2040a6ef44b 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -61,7 +61,6 @@
 #include <trace/events/mm_mt.h>
 #define CONFIG_DEBUG_MAPLE_TREE
 #undef CONFIG_DEBUG_MAPLE_TREE_VERBOSE
-#define CONFIG_DEBUG_VM_RB 1
 extern void mt_validate(struct maple_tree *mt);
 
 #ifndef arch_mmap_check
@@ -128,7 +127,6 @@ static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
 }
 
 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
-// LRH: Needed
 void vma_set_page_prot(struct vm_area_struct *vma)
 {
        unsigned long vm_flags = vma->vm_flags;
@@ -146,7 +144,6 @@ void vma_set_page_prot(struct vm_area_struct *vma)
 /*
  * Requires inode->i_mapping->i_mmap_rwsem
  */
-// LRH: Needed
 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
 {
@@ -164,7 +161,6 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
  * Unlink a file-based vm structure from its interval tree, to hide
  * vma from rmap and vmtruncate before freeing its page tables.
  */
-// LRH: Needed
 void unlink_file_vma(struct vm_area_struct *vma)
 {
        struct file *file = vma->vm_file;
@@ -180,7 +176,6 @@ void unlink_file_vma(struct vm_area_struct *vma)
 /*
  * Close a vm structure and free it, returning the next.
  */
-// LRH: Needed
 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
 {
        struct vm_area_struct *next = vma->vm_next;
@@ -195,7 +190,6 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        return next;
 }
 
-// LRH: Needed
 static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
                struct list_head *uf);
 SYSCALL_DEFINE1(brk, unsigned long, brk)
@@ -297,314 +291,16 @@ out:
        return retval;
 }
 
-// LRH: not needed.
-static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
-{
-       unsigned long gap, prev_end;
-
-       /*
-        * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
-        * allow two stack_guard_gaps between them here, and when choosing
-        * an unmapped area; whereas when expanding we only require one.
-        * That's a little inconsistent, but keeps the code here simpler.
-        */
-       gap = vm_start_gap(vma);
-       if (vma->vm_prev) {
-               prev_end = vm_end_gap(vma->vm_prev);
-               if (gap > prev_end)
-                       gap -= prev_end;
-               else
-                       gap = 0;
-       }
-       return gap;
-}
-
-#ifdef CONFIG_DEBUG_VM_RB
-static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
-{
-       unsigned long max = vma_compute_gap(vma), subtree_gap;
-       if (vma->vm_rb.rb_left) {
-               subtree_gap = rb_entry(vma->vm_rb.rb_left,
-                               struct vm_area_struct, vm_rb)->rb_subtree_gap;
-               if (subtree_gap > max)
-                       max = subtree_gap;
-       }
-       if (vma->vm_rb.rb_right) {
-               subtree_gap = rb_entry(vma->vm_rb.rb_right,
-                               struct vm_area_struct, vm_rb)->rb_subtree_gap;
-               if (subtree_gap > max)
-                       max = subtree_gap;
-       }
-       return max;
-}
-
-static int browse_rb(struct mm_struct *mm)
-{
-       struct rb_root *root = &mm->mm_rb;
-       int i = 0, j, bug = 0;
-       struct rb_node *nd, *pn = NULL;
-       unsigned long prev = 0, pend = 0;
-
-       for (nd = rb_first(root); nd; nd = rb_next(nd)) {
-               struct vm_area_struct *vma;
-               vma = rb_entry(nd, struct vm_area_struct, vm_rb);
-               if (vma->vm_start < prev) {
-                       pr_emerg("vm_start %lx < prev %lx\n",
-                                 vma->vm_start, prev);
-                       bug = 1;
-               }
-               if (vma->vm_start < pend) {
-                       pr_emerg("vm_start %lx < pend %lx\n",
-                                 vma->vm_start, pend);
-                       bug = 1;
-               }
-               if (vma->vm_start > vma->vm_end) {
-                       pr_emerg("vm_start %lx > vm_end %lx\n",
-                                 vma->vm_start, vma->vm_end);
-                       bug = 1;
-               }
-               spin_lock(&mm->page_table_lock);
-               if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
-                       pr_emerg("free gap %lx, correct %lx\n",
-                              vma->rb_subtree_gap,
-                              vma_compute_subtree_gap(vma));
-                       bug = 1;
-               }
-               spin_unlock(&mm->page_table_lock);
-               i++;
-               pn = nd;
-               prev = vma->vm_start;
-               pend = vma->vm_end;
-       }
-       j = 0;
-       for (nd = pn; nd; nd = rb_prev(nd))
-               j++;
-       if (i != j) {
-               pr_emerg("backwards %d, forwards %d\n", j, i);
-               bug = 1;
-       }
-       return bug ? -1 : i;
-}
 #if defined(CONFIG_DEBUG_MAPLE_TREE)
-extern void mt_dump(const struct maple_tree *mt);
-
-#if 0
-static void __vma_mt_dump(struct mm_struct *mm)
-{
-       struct vm_area_struct *entry = NULL;
-
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
-
-       rcu_read_lock();
-       mas_for_each(&mas, entry, ULONG_MAX) {
-               if (xa_is_zero(entry))
-                       continue;
-
-               pr_debug("vma: %lu-%lu\t%lu-%lu\n", entry->vm_start,
-                               entry->vm_end, mas.index, mas.last);
-       }
-       rcu_read_unlock();
-}
-#endif
-
-/*
- * Validate the maple tree
- */
-static void validate_mm_mt(struct mm_struct *mm,
-               struct vm_area_struct *ignore)
-{
-       struct maple_tree *mt = &mm->mm_mt;
-       struct vm_area_struct *vma_mt, *vma = mm->mmap;
-
-       MA_STATE(mas, mt, 0, 0);
-       rcu_read_lock();
-       mas_for_each(&mas, vma_mt, ULONG_MAX) {
-               if (xa_is_zero(vma_mt))
-                       continue;
-
-               if (vma && vma == ignore)
-                       vma = vma->vm_next;
-
-               if (!vma)
-                       break;
-
-               if ((vma != vma_mt) ||
-                   (vma->vm_start != vma_mt->vm_start) ||
-                   (vma->vm_end != vma_mt->vm_end) ||
-                   (vma->vm_start != mas.index) ||
-                   (vma->vm_end -1 != mas.last)){
-                       pr_emerg("issue in %s\n", current->comm);
-                       dump_stack();
-#ifdef CONFIG_DEBUG_VM
-                       dump_vma(vma_mt);
-                       pr_emerg("and next in rb\n");
-                       dump_vma(vma->vm_next);
-#endif
-                       pr_emerg("mt piv: %px %lu - %lu\n", vma_mt,
-                                mas.index, mas.last);
-                       pr_emerg("mt vma: %px %lu - %lu\n", vma_mt,
-                                vma_mt->vm_start, vma_mt->vm_end);
-                       pr_emerg("rb vma: %px %lu - %lu\n", vma,
-                                vma->vm_start, vma->vm_end);
-                       if (ignore)
-                               pr_emerg("rb_skip %px %lu - %lu\n", ignore,
-                                       ignore->vm_start, ignore->vm_end);
-                       pr_emerg("rb->next = %px %lu - %lu\n", vma->vm_next,
-                                       vma->vm_next->vm_start, vma->vm_next->vm_end);
-
-                       mt_dump(mas.tree);
-                       if (vma_mt->vm_end != mas.last + 1) {
-                               pr_err("vma: %px vma_mt %lu-%lu\tmt %lu-%lu\n",
-                                               mm, vma_mt->vm_start, vma_mt->vm_end,
-                                               mas.index, mas.last);
-                               mt_dump(mas.tree);
-                       }
-                       VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm);
-                       if (vma_mt->vm_start != mas.index) {
-                               pr_err("vma: %px vma_mt %px %lu - %lu doesn't match\n",
-                                               mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end);
-                               mt_dump(mas.tree);
-                       }
-                       VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm);
-               }
-               VM_BUG_ON(vma != vma_mt);
-               vma = vma->vm_next;
-
-       }
-       VM_BUG_ON(vma);
-
-       rcu_read_unlock();
-       mt_validate(&mm->mm_mt);
-}
-#endif
-static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
-{
-       struct rb_node *nd;
-
-       for (nd = rb_first(root); nd; nd = rb_next(nd)) {
-               struct vm_area_struct *vma;
-               vma = rb_entry(nd, struct vm_area_struct, vm_rb);
-               VM_BUG_ON_VMA(vma != ignore &&
-                       vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
-                       vma);
-       }
-}
+extern void mt_validate(struct maple_tree *mt);
 
 static void validate_mm(struct mm_struct *mm)
 {
-       int bug = 0;
-       int i = 0;
-       unsigned long highest_address = 0;
-       struct vm_area_struct *vma = mm->mmap;
-
-       while (vma) {
-               struct anon_vma *anon_vma = vma->anon_vma;
-//             struct anon_vma_chain *avc;
-
-//             pr_cont("vma: %lu-%lu", vma->vm_start, vma->vm_end);
-               if (anon_vma) {
-//                     pr_cont(" anon");
-                       anon_vma_lock_read(anon_vma);
-//                     list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
-//                             anon_vma_interval_tree_verify(avc);
-                       anon_vma_unlock_read(anon_vma);
-               }
-//             pr_cont("\n");
-
-               highest_address = vm_end_gap(vma);
-               vma = vma->vm_next;
-               i++;
-       }
-       if (i != mm->map_count) {
-               pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
-               bug = 1;
-       }
-       if (highest_address != mm->highest_vm_end) {
-               pr_emerg("mm->highest_vm_end %lx, found %lx\n",
-                         mm->highest_vm_end, highest_address);
-               bug = 1;
-       }
-       i = browse_rb(mm);
-       if (i != mm->map_count) {
-               if (i != -1)
-                       pr_emerg("map_count %d rb %d\n", mm->map_count, i);
-               bug = 1;
-       }
-       VM_BUG_ON_MM(bug, mm);
+       mt_validate(&mm->mm_mt);
 }
 #else
-#define validate_mm_rb(root, ignore) do { } while (0)
-#define validate_mm_mt(root, ignore) do { } while (0)
 #define validate_mm(mm) do { } while (0)
 #endif
-
-RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks,
-                        struct vm_area_struct, vm_rb,
-                        unsigned long, rb_subtree_gap, vma_compute_gap)
-
-/*
- * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
- * vma->vm_prev->vm_end values changed, without modifying the vma's position
- * in the rbtree.
- */
-// LRH: Not needed
-static void vma_gap_update(struct vm_area_struct *vma)
-{
-       /*
-        * As it turns out, RB_DECLARE_CALLBACKS_MAX() already created
-        * a callback function that does exactly what we want.
-        */
-       vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
-}
-
-// LRH: Not needed
-static inline void vma_rb_insert(struct vm_area_struct *vma,
-                                struct rb_root *root)
-{
-       /* All rb_subtree_gap values must be consistent prior to insertion */
-       validate_mm_rb(root, NULL);
-
-       rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
-}
-
-// LRH:  Make sure everywhere uses an mt_erase too..
-static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
-{
-       /*
-        * Note rb_erase_augmented is a fairly large inline function,
-        * so make sure we instantiate it only once with our desired
-        * augmented rbtree callbacks.
-        */
-       rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
-}
-
-// LRH: Not needed
-static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
-                                               struct rb_root *root,
-                                               struct vm_area_struct *ignore)
-{
-       /*
-        * All rb_subtree_gap values must be consistent prior to erase,
-        * with the possible exception of the "next" vma being erased if
-        * next->vm_start was reduced.
-        */
-       validate_mm_rb(root, ignore);
-
-       __vma_rb_erase(vma, root);
-}
-
-static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
-                                        struct rb_root *root)
-{
-       /*
-        * All rb_subtree_gap values must be consistent prior to erase,
-        * with the possible exception of the vma being erased.
-        */
-       validate_mm_rb(root, vma);
-
-       __vma_rb_erase(vma, root);
-}
-
 /*
  * vma has some anon_vma assigned, and is already inserted on that
  * anon_vma's interval trees.
@@ -637,41 +333,16 @@ anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
                anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
 }
 
-static int find_vma_links(struct mm_struct *mm, unsigned long addr,
-               unsigned long end, struct vm_area_struct **pprev,
-               struct rb_node ***rb_link, struct rb_node **rb_parent)
+static bool has_overlaps(struct mm_struct *mm, unsigned long start,
+                        unsigned long end, struct vm_area_struct **pprev)
 {
-       struct rb_node **__rb_link, *__rb_parent, *rb_prev;
-
-       __rb_link = &mm->mm_rb.rb_node;
-       rb_prev = __rb_parent = NULL;
-
-       while (*__rb_link) {
-               struct vm_area_struct *vma_tmp;
-
-               __rb_parent = *__rb_link;
-               vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
-
-               if (vma_tmp->vm_end > addr) {
-                       /* Fail if an existing vma overlaps the area */
-                       if (vma_tmp->vm_start < end)
-                               return -ENOMEM;
-                       __rb_link = &__rb_parent->rb_left;
-               } else {
-                       rb_prev = __rb_parent;
-                       __rb_link = &__rb_parent->rb_right;
-               }
-       }
+       struct vm_area_struct *existing;
 
-       *pprev = NULL;
-       if (rb_prev)
-               *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
-       *rb_link = __rb_link;
-       *rb_parent = __rb_parent;
-       return 0;
+       MA_STATE(mas, &mm->mm_mt, start, start);
+       existing = mas_find(&mas, end - 1);
+       *pprev = mas_prev(&mas, 0);
+       return (existing ? true : false);
 }
-
-#if 0
 /* Private
  * clean_overlaps() - Call do_munmap if there exists any mapping within @start
  * to @end.  Sets @pprev to the previous entry or NULL if none exists.
@@ -681,87 +352,36 @@ static int clean_overlaps(struct mm_struct *mm, unsigned long start,
                unsigned long len, struct vm_area_struct **pprev,
                struct list_head *uf)
 {
-       struct vm_area_struct *vma;
-       MA_STATE(mas, &mm->mm_mt, start, start);
-
-       *pprev = NULL;
-
-       rcu_read_lock();
-       vma = mas_find(&mas, start + len);
-       if (vma)
-               *pprev = mas_prev(&mas, 0);
-       rcu_read_unlock();
+       if (has_overlaps(mm, start, start + len, pprev))
+               return do_munmap(mm, start, len, uf);
 
-       if (vma) {
-               if (do_munmap(mm, start, len, uf))
-                       return -ENOMEM;
-       }
        return 0;
 }
-#endif
 
 static unsigned long count_vma_pages_range(struct mm_struct *mm,
                unsigned long addr, unsigned long end)
 {
        unsigned long nr_pages = 0;
-       unsigned long nr_mt_pages = 0;
        struct vm_area_struct *vma;
+       unsigned long vm_start, vm_end;
 
        /* Find first overlaping mapping */
        vma = find_vma_intersection(mm, addr, end);
        if (!vma)
                return 0;
 
-       nr_pages = (min(end, vma->vm_end) -
-               max(addr, vma->vm_start)) >> PAGE_SHIFT;
+       vm_start = vma->vm_start;
+       vm_end = vma->vm_end;
 
-       /* Iterate over the rest of the overlaps */
-       for (vma = vma->vm_next; vma; vma = vma->vm_next) {
-               unsigned long overlap_len;
-
-               if (vma->vm_start > end)
-                       break;
+       nr_pages = (min(end, vm_end) - max(addr, vm_start)) >> PAGE_SHIFT;
 
-               overlap_len = min(end, vma->vm_end) - vma->vm_start;
-               nr_pages += overlap_len >> PAGE_SHIFT;
-       }
-
-       mt_for_each(&mm->mm_mt, vma, addr, end) {
-               nr_mt_pages +=
-                       (min(end, vma->vm_end) - vma->vm_start) >> PAGE_SHIFT;
-       }
-
-       VM_BUG_ON_MM(nr_pages != nr_mt_pages, mm);
+       /* Iterate over the rest of the overlaps */
+       mt_for_each(&mm->mm_mt, vma, addr, end)
+               nr_pages += (min(end, vm_end) - vm_start) >> PAGE_SHIFT;
 
        return nr_pages;
 }
 
-// LRH: Not needed
-void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
-               struct rb_node **rb_link, struct rb_node *rb_parent)
-{
-       /* Update tracking information for the gap following the new vma. */
-       if (vma->vm_next)
-               vma_gap_update(vma->vm_next);
-       else
-               mm->highest_vm_end = vm_end_gap(vma);
-
-       /*
-        * vma->vm_prev wasn't known when we followed the rbtree to find the
-        * correct insertion point for that vma. As a result, we could not
-        * update the vma vm_rb parents rb_subtree_gap values on the way down.
-        * So, we first insert the vma with a zero rb_subtree_gap value
-        * (to be consistent with what we did on the way down), and then
-        * immediately update the gap to the correct value. Finally we
-        * rebalance the rbtree after all augmented values have been set.
-        */
-       rb_link_node(&vma->vm_rb, rb_parent, rb_link);
-       vma->rb_subtree_gap = 0;
-       vma_gap_update(vma);
-       vma_rb_insert(vma, &mm->mm_rb);
-}
-
-// LRH: Needed
 static void __vma_link_file(struct vm_area_struct *vma)
 {
        struct file *file;
@@ -815,20 +435,15 @@ void vma_store(struct mm_struct *mm, struct vm_area_struct *vma)
 {
        __vma_mt_store(mm, vma);
 }
-// LRH: Needed - update linked list, should fine.
-static void
-__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
-       struct vm_area_struct *prev, struct rb_node **rb_link,
-       struct rb_node *rb_parent)
+static void __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
+                      struct vm_area_struct *prev)
 {
        __vma_mt_store(mm, vma);
        __vma_link_list(mm, vma, prev);
-       __vma_link_rb(mm, vma, rb_link, rb_parent);
 }
 
 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
-                       struct vm_area_struct *prev, struct rb_node **rb_link,
-                       struct rb_node *rb_parent)
+                    struct vm_area_struct *prev)
 {
        struct address_space *mapping = NULL;
 
@@ -837,7 +452,7 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
                i_mmap_lock_write(mapping);
        }
 
-       __vma_link(mm, vma, prev, rb_link, rb_parent);
+       __vma_link(mm, vma, prev);
        __vma_link_file(vma);
 
        if (mapping)
@@ -855,21 +470,17 @@ extern void mt_dump(const struct maple_tree *mt);
 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 {
        struct vm_area_struct *prev;
-       struct rb_node **rb_link, *rb_parent;
 
-       if (find_vma_links(mm, vma->vm_start, vma->vm_end,
-                          &prev, &rb_link, &rb_parent))
+       if (has_overlaps(mm, vma->vm_start, vma->vm_end, &prev))
                BUG();
-       __vma_link(mm, vma, prev, rb_link, rb_parent);
+       __vma_link(mm, vma, prev);
        mm->map_count++;
 }
 
-// LRH: Fixed.
 static __always_inline void __vma_unlink_common(struct mm_struct *mm,
                                                struct vm_area_struct *vma,
                                                struct vm_area_struct *ignore)
 {
-       vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
        __vma_unlink_list(mm, vma);
        /* Kill the cache */
        vmacache_invalidate(mm);
@@ -897,7 +508,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        int remove_next = 0;
 
        validate_mm(mm);
-       validate_mm_mt(mm, NULL);
 
        if (next && !insert) {
                struct vm_area_struct *exporter = NULL, *importer = NULL;
@@ -1038,9 +648,8 @@ again:
                end_changed = true;
        }
 
-       if (end_changed || start_changed) {
+       if (end_changed || start_changed)
                __vma_mt_store(mm, vma);
-       }
 
        vma->vm_pgoff = pgoff;
        if (adjust_next) {
@@ -1091,15 +700,8 @@ again:
                /* maple tree store is done in the __vma_link call in this
                 * call graph */
                __insert_vm_struct(mm, insert);
-       } else {
-               if (start_changed)
-                       vma_gap_update(vma);
-               if (end_changed) {
-                       if (!next)
-                               mm->highest_vm_end = vm_end_gap(vma);
-                       else if (!adjust_next)
-                               vma_gap_update(next);
-               }
+       } else if (end_changed && !next) {
+               mm->highest_vm_end = vm_end_gap(vma);
        }
 
        if (anon_vma) {
@@ -1158,10 +760,7 @@ again:
                        remove_next = 1;
                        end = next->vm_end;
                        goto again;
-               }
-               else if (next)
-                       vma_gap_update(next);
-               else {
+               } else if (!next) {
                        /*
                         * If remove_next == 2 we obviously can't
                         * reach this path.
@@ -1188,7 +787,6 @@ again:
                uprobe_mmap(insert);
 
        validate_mm(mm);
-       validate_mm_mt(mm, NULL);
 
        return 0;
 }
@@ -1348,7 +946,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
        struct vm_area_struct *area, *next;
        int err;
 
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        /*
         * We later require that vma->vm_flags == vm_flags,
         * so this tests vma->vm_flags & VM_SPECIAL, too.
@@ -1427,7 +1025,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                khugepaged_enter_vma_merge(area, vm_flags);
                return area;
        }
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
 
        return NULL;
 }
@@ -1917,10 +1515,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
        int error;
-       struct rb_node **rb_link, *rb_parent;
        unsigned long charged = 0;
 
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        /* Check against address space limit. */
        if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
                unsigned long nr_pages;
@@ -1937,13 +1534,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        }
 
        /* Clear old maps */
-       // if (clean_overlaps(mm, addr, len, &prev))
-       //      return -ENOMEM;
-       while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
-                             &rb_parent)) {
-               if (do_munmap(mm, addr, len, uf))
-                       return -ENOMEM;
-       }
+       if (clean_overlaps(mm, addr, len, &prev, uf))
+             return -ENOMEM;
 
        /*
         * Private writable mapping: check memory availability
@@ -2006,8 +1598,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
                 *
                 * Answer: Yes, several device drivers can do it in their
                 *         f_op->mmap method. -DaveM
-                * Bug: If addr is changed, prev, rb_link, rb_parent should
-                *      be updated for vma_link()
                 */
                WARN_ON_ONCE(addr != vma->vm_start);
 
@@ -2021,8 +1611,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
                vma_set_anonymous(vma);
        }
 
-       // Drop this for maple uses later.
-       vma_link(mm, vma, prev, rb_link, rb_parent);
+       vma_link(mm, vma, prev);
        /* Once vma denies write, undo our temporary denial count */
        if (file) {
                if (vm_flags & VM_SHARED)
@@ -2058,7 +1647,7 @@ out:
 
        vma_set_page_prot(vma);
 
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        return addr;
 
 unmap_and_free_vma:
@@ -2078,136 +1667,40 @@ free_vma:
 unacct_error:
        if (charged)
                vm_unacct_memory(charged);
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        return error;
 }
 
 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 {
-       /*
-        * We implement the search by looking for an rbtree node that
-        * immediately follows a suitable gap. That is,
-        * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
-        * - gap_end   = vma->vm_start        >= info->low_limit  + length;
-        * - gap_end - gap_start >= length
-        */
+       unsigned long length, gap;
 
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long length, low_limit, high_limit, gap_start, gap_end;
-       unsigned long gap;
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
+       MA_STATE(mas, &current->mm->mm_mt, 0, 0);
+       validate_mm(current->mm);
 
        /* Adjust search length to account for worst case alignment overhead */
        length = info->length + info->align_mask;
        if (length < info->length)
                return -ENOMEM;
 
-
-       // Maple tree is self contained.
        rcu_read_lock();
-       if (mas_get_unmapped_area(&mas, info->low_limit,
-                               info->high_limit - 1, length))
+       if (mas_get_unmapped_area(&mas, info->low_limit, info->high_limit - 1,
+                                 length)) {
+               rcu_read_unlock();
                return -ENOMEM;
+       }
        rcu_read_unlock();
        gap = mas.index;
        gap += (info->align_offset - gap) & info->align_mask;
-
-       /* Adjust search limits by the desired length */
-       if (info->high_limit < length)
-               return -ENOMEM;
-       high_limit = info->high_limit - length;
-
-       if (info->low_limit > high_limit)
-               return -ENOMEM;
-       low_limit = info->low_limit + length;
-
-       /* Check if rbtree root looks promising */
-       if (RB_EMPTY_ROOT(&mm->mm_rb))
-               goto check_highest;
-       vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
-       if (vma->rb_subtree_gap < length)
-               goto check_highest;
-
-       while (true) {
-               /* Visit left subtree if it looks promising */
-               gap_end = vm_start_gap(vma);
-               if (gap_end >= low_limit && vma->vm_rb.rb_left) {
-                       struct vm_area_struct *left =
-                               rb_entry(vma->vm_rb.rb_left,
-                                        struct vm_area_struct, vm_rb);
-                       if (left->rb_subtree_gap >= length) {
-                               vma = left;
-                               continue;
-                       }
-               }
-
-               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
-check_current:
-               /* Check if current node has a suitable gap */
-               if (gap_start > high_limit)
-                       return -ENOMEM;
-               if (gap_end >= low_limit &&
-                   gap_end > gap_start && gap_end - gap_start >= length)
-                       goto found;
-
-               /* Visit right subtree if it looks promising */
-               if (vma->vm_rb.rb_right) {
-                       struct vm_area_struct *right =
-                               rb_entry(vma->vm_rb.rb_right,
-                                        struct vm_area_struct, vm_rb);
-                       if (right->rb_subtree_gap >= length) {
-                               vma = right;
-                               continue;
-                       }
-               }
-
-               /* Go back up the rbtree to find next candidate node */
-               while (true) {
-                       struct rb_node *prev = &vma->vm_rb;
-                       if (!rb_parent(prev))
-                               goto check_highest;
-                       vma = rb_entry(rb_parent(prev),
-                                      struct vm_area_struct, vm_rb);
-                       if (prev == vma->vm_rb.rb_left) {
-                               gap_start = vm_end_gap(vma->vm_prev);
-                               gap_end = vm_start_gap(vma);
-                               goto check_current;
-                       }
-               }
-       }
-
-check_highest:
-       /* Check highest gap, which does not precede any rbtree node */
-       gap_start = mm->highest_vm_end;
-       gap_end = ULONG_MAX;  /* Only for VM_BUG_ON below */
-       if (gap_start > high_limit)
-               return -ENOMEM;
-
-found:
-       /* We found a suitable gap. Clip it with the original low_limit. */
-       if (gap_start < info->low_limit)
-               gap_start = info->low_limit;
-
-       /* Adjust gap address to the desired alignment */
-       gap_start += (info->align_offset - gap_start) & info->align_mask;
-
-       VM_BUG_ON(gap_start + info->length > info->high_limit);
-       VM_BUG_ON(gap_start + info->length > gap_end);
-
-       VM_BUG_ON(gap != gap_start);
-       return gap_start;
+       return gap;
 }
 
 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 {
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma = NULL;
-       unsigned long length, low_limit, high_limit, gap_start, gap_end;
-       unsigned long gap;
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
+       unsigned long length, gap;
 
-       validate_mm_mt(mm, NULL);
+       MA_STATE(mas, &current->mm->mm_mt, 0, 0);
+       validate_mm(current->mm);
        /* Adjust search length to account for worst case alignment overhead */
        length = info->length + info->align_mask;
        if (length < info->length)
@@ -2215,122 +1708,14 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 
        rcu_read_lock();
        if (mas_get_unmapped_area_rev(&mas, info->low_limit, info->high_limit,
-                               length))
+                               length)) {
+               rcu_read_unlock();
                return -ENOMEM;
-
+       }
        rcu_read_unlock();
        gap = (mas.index + info->align_mask) & ~info->align_mask;
        gap -= info->align_offset & info->align_mask;
-       /*
-        * Adjust search limits by the desired length.
-        * See implementation comment at top of unmapped_area().
-        */
-       gap_end = info->high_limit;
-       if (gap_end < length)
-               return -ENOMEM;
-       high_limit = gap_end - length;
-
-       if (info->low_limit > high_limit)
-               return -ENOMEM;
-       low_limit = info->low_limit + length;
-
-       /* Check highest gap, which does not precede any rbtree node */
-       gap_start = mm->highest_vm_end;
-       if (gap_start <= high_limit)
-               goto found_highest;
-
-       /* Check if rbtree root looks promising */
-       if (RB_EMPTY_ROOT(&mm->mm_rb))
-               return -ENOMEM;
-       vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
-       if (vma->rb_subtree_gap < length)
-               return -ENOMEM;
-
-       while (true) {
-               /* Visit right subtree if it looks promising */
-               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
-               if (gap_start <= high_limit && vma->vm_rb.rb_right) {
-                       struct vm_area_struct *right =
-                               rb_entry(vma->vm_rb.rb_right,
-                                        struct vm_area_struct, vm_rb);
-                       if (right->rb_subtree_gap >= length) {
-                               vma = right;
-                               continue;
-                       }
-               }
-
-check_current:
-               /* Check if current node has a suitable gap */
-               gap_end = vm_start_gap(vma);
-               if (gap_end < low_limit)
-                       return -ENOMEM;
-               if (gap_start <= high_limit &&
-                   gap_end > gap_start && gap_end - gap_start >= length)
-                       goto found;
-
-               /* Visit left subtree if it looks promising */
-               if (vma->vm_rb.rb_left) {
-                       struct vm_area_struct *left =
-                               rb_entry(vma->vm_rb.rb_left,
-                                        struct vm_area_struct, vm_rb);
-                       if (left->rb_subtree_gap >= length) {
-                               vma = left;
-                               continue;
-                       }
-               }
-
-               /* Go back up the rbtree to find next candidate node */
-               while (true) {
-                       struct rb_node *prev = &vma->vm_rb;
-                       if (!rb_parent(prev))
-                               return -ENOMEM;
-                       vma = rb_entry(rb_parent(prev),
-                                      struct vm_area_struct, vm_rb);
-                       if (prev == vma->vm_rb.rb_right) {
-                               gap_start = vma->vm_prev ?
-                                       vm_end_gap(vma->vm_prev) : 0;
-                               goto check_current;
-                       }
-               }
-       }
-
-found:
-       /* We found a suitable gap. Clip it with the original high_limit. */
-       if (gap_end > info->high_limit)
-               gap_end = info->high_limit;
-
-found_highest:
-       /* Compute highest gap address at the desired alignment */
-       gap_end -= info->length;
-       gap_end -= (gap_end - info->align_offset) & info->align_mask;
-
-       VM_BUG_ON(gap_end < info->low_limit);
-       VM_BUG_ON(gap_end < gap_start);
-
-       if (gap != gap_end) {
-               struct vm_area_struct *rb_find_vma(struct mm_struct *mm, unsigned long addr);
-               pr_err("%s: %px Gap was found: mt %lu gap_end %lu\n", __func__,
-                               mm, gap, gap_end);
-               pr_err("window was %lu - %lu size %lu\n", info->high_limit,
-                               info->low_limit, length);
-               pr_err("mas.min %lu max %lu mas.last %lu\n", mas.min, mas.max,
-                               mas.last);
-               pr_err("mas.index %lu align mask %lu offset %lu\n", mas.index,
-                               info->align_mask, info->align_offset);
-               pr_err("rb_find_vma find on %lu => %px (%px)\n", mas.index,
-                               rb_find_vma(mm, mas.index), vma);
-               mt_dump(&mm->mm_mt);
-               {
-                       struct vm_area_struct *dv = mm->mmap;
-                       while (dv) {
-                               printk("vma %px %lu-%lu\n", dv, dv->vm_start, dv->vm_end);
-                               dv = dv->vm_next;
-                       }
-               }
-               VM_BUG_ON(gap != gap_end);
-       }
-
-       return gap_end;
+       return gap;
 }
 
 /*
@@ -2502,6 +1887,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
 
        if (addr > TASK_SIZE - len)
                return -ENOMEM;
+
        if (offset_in_page(addr))
                return -EINVAL;
 
@@ -2512,7 +1898,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
 EXPORT_SYMBOL(get_unmapped_area);
 
 /**
- * mt_find_vma() - Find the VMA for a given address, or the next vma.  May return
+ * find_vma() - Find the VMA for a given address, or the next vma.  May return
  * NULL in the case of no vma at addr or above
  * @mm The mm_struct to check
  * @addr: The address
@@ -2520,7 +1906,7 @@ EXPORT_SYMBOL(get_unmapped_area);
  * Returns: The VMA assoicated with addr, or the next vma.
  * May return NULL in the case of no vma at addr or above.
  */
-struct vm_area_struct *mt_find_vma(struct mm_struct *mm, unsigned long addr)
+struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
        struct vm_area_struct *vma;
 
@@ -2530,60 +1916,15 @@ struct vm_area_struct *mt_find_vma(struct mm_struct *mm, unsigned long addr)
                return vma;
 
        vma = mt_find(&mm->mm_mt, &addr, ULONG_MAX);
-
        if (vma)
                vmacache_update(addr, vma);
-       return vma;
-}
-
-/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
-struct vm_area_struct *rb_find_vma(struct mm_struct *mm, unsigned long addr)
-{
-       struct rb_node *rb_node;
-       struct vm_area_struct *vma;
 
-       /* Check the cache first. */
-       vma = vmacache_find(mm, addr);
-       if (likely(vma))
-               return vma;
-
-       rb_node = mm->mm_rb.rb_node;
-
-       while (rb_node) {
-               struct vm_area_struct *tmp;
-
-               tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
-
-               if (tmp->vm_end > addr) {
-                       vma = tmp;
-                       if (tmp->vm_start <= addr)
-                               break;
-                       rb_node = rb_node->rb_left;
-               } else
-                       rb_node = rb_node->rb_right;
-       }
-
-       if (vma)
-               vmacache_update(addr, vma);
        return vma;
 }
-
-struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
-{
-       struct vm_area_struct *ret = rb_find_vma(mm, addr);
-       struct vm_area_struct *mt_ret = mt_find_vma(mm, addr);
-       if (ret != mt_ret) {
-               pr_err("Looking for %lu\n", addr);
-               mt_dump(&mm->mm_mt);
-               pr_err("%px %lu: ret %px mt_ret %px\n", mm, addr, ret, mt_ret);
-       }
-       VM_BUG_ON_VMA((unsigned long)ret != (unsigned long)mt_ret , ret);
-       return ret;
-}
 EXPORT_SYMBOL(find_vma);
 
 /**
- * mt_find_vma_prev() - Find the VMA for a given address, or the next vma and
+ * find_vma_prev() - Find the VMA for a given address, or the next vma and
  * sets %pprev to the previous VMA, if any.
  * @mm The mm_struct to check
  * @addr: The address
@@ -2593,7 +1934,7 @@ EXPORT_SYMBOL(find_vma);
  * May return NULL in the case of no vma at addr or above.
  */
 struct vm_area_struct *
-mt_find_vma_prev(struct mm_struct *mm, unsigned long addr,
+find_vma_prev(struct mm_struct *mm, unsigned long addr,
                        struct vm_area_struct **pprev)
 {
        struct vm_area_struct *vma;
@@ -2606,39 +1947,6 @@ mt_find_vma_prev(struct mm_struct *mm, unsigned long addr,
        return vma;
 }
 
-/*
- * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
- */
-struct vm_area_struct *
-rb_find_vma_prev(struct mm_struct *mm, unsigned long addr,
-                       struct vm_area_struct **pprev)
-{
-       struct vm_area_struct *vma;
-
-       vma = find_vma(mm, addr);
-       if (vma) {
-               *pprev = vma->vm_prev;
-       } else {
-               struct rb_node *rb_node = rb_last(&mm->mm_rb);
-
-               *pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL;
-       }
-       return vma;
-}
-
-struct vm_area_struct *
-find_vma_prev(struct mm_struct *mm, unsigned long addr,
-                       struct vm_area_struct **pprev)
-{
-       struct vm_area_struct *mt_prev;
-       struct vm_area_struct *ret = rb_find_vma_prev(mm, addr, pprev);
-       VM_BUG_ON_VMA((unsigned long)ret !=
-                       (unsigned long)mt_find_vma_prev(mm, addr, &mt_prev),
-                       ret);
-       VM_BUG_ON_VMA(mt_prev != *pprev, *pprev);
-       return ret;
-}
-
 /*
  * Verify that the stack growth is acceptable and
  * update accounting. This is shared with both the
@@ -2697,7 +2005,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        unsigned long gap_addr;
        int error = 0;
 
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
 
@@ -2705,6 +2013,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        address &= PAGE_MASK;
        if (address >= (TASK_SIZE & PAGE_MASK))
                return -ENOMEM;
+
        address += PAGE_SIZE;
 
        /* Enforce stack_guard_gap */
@@ -2744,15 +2053,13 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                        error = acct_stack_growth(vma, size, grow);
                        if (!error) {
                                /*
-                                * vma_gap_update() doesn't support concurrent
-                                * updates, but we only hold a shared mmap_lock
-                                * lock here, so we need to protect against
-                                * concurrent vma expansions.
-                                * anon_vma_lock_write() doesn't help here, as
-                                * we don't guarantee that all growable vmas
-                                * in a mm share the same root anon vma.
-                                * So, we reuse mm->page_table_lock to guard
-                                * against concurrent vma expansions.
+                                * We only hold a shared mmap_lock lock here, so
+                                * we need to protect against concurrent vma
+                                * expansions.  anon_vma_lock_write() doesn't
+                                * help here, as we don't guarantee that all
+                                * growable vmas in a mm share the same root
+                                * anon vma.  So, we reuse mm->page_table_lock
+                                * to guard against concurrent vma expansions.
                                 */
                                spin_lock(&mm->page_table_lock);
                                if (vma->vm_flags & VM_LOCKED)
@@ -2760,10 +2067,9 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                                vm_stat_account(mm, vma->vm_flags, grow);
                                anon_vma_interval_tree_pre_update_vma(vma);
                                vma->vm_end = address;
+                               __vma_mt_store(mm, vma);
                                anon_vma_interval_tree_post_update_vma(vma);
-                               if (vma->vm_next)
-                                       vma_gap_update(vma->vm_next);
-                               else
+                               if (!vma->vm_next)
                                        mm->highest_vm_end = vm_end_gap(vma);
                                spin_unlock(&mm->page_table_lock);
 
@@ -2774,7 +2080,6 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        anon_vma_unlock_write(vma->anon_vma);
        khugepaged_enter_vma_merge(vma, vma->vm_flags);
        validate_mm(mm);
-       validate_mm_mt(mm, NULL);
        return error;
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -2826,15 +2131,13 @@ int expand_downwards(struct vm_area_struct *vma,
                        error = acct_stack_growth(vma, size, grow);
                        if (!error) {
                                /*
-                                * vma_gap_update() doesn't support concurrent
-                                * updates, but we only hold a shared mmap_lock
-                                * lock here, so we need to protect against
-                                * concurrent vma expansions.
-                                * anon_vma_lock_write() doesn't help here, as
-                                * we don't guarantee that all growable vmas
-                                * in a mm share the same root anon vma.
-                                * So, we reuse mm->page_table_lock to guard
-                                * against concurrent vma expansions.
+                                * We only hold a shared mmap_lock lock here, so
+                                * we need to protect against concurrent vma
+                                * expansions.  anon_vma_lock_write() doesn't
+                                * help here, as we don't guarantee that all
+                                * growable vmas in a mm share the same root
+                                * anon vma.  So, we reuse mm->page_table_lock
+                                * to guard against concurrent vma expansions.
                                 */
                                spin_lock(&mm->page_table_lock);
                                if (vma->vm_flags & VM_LOCKED)
@@ -2846,7 +2149,6 @@ int expand_downwards(struct vm_area_struct *vma,
                                // Overwrite old entry in mtree.
                                __vma_mt_store(mm, vma);
                                anon_vma_interval_tree_post_update_vma(vma);
-                               vma_gap_update(vma);
                                spin_unlock(&mm->page_table_lock);
 
                                perf_event_mmap(vma);
@@ -2952,7 +2254,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
                vma = remove_vma(vma);
        } while (vma);
        vm_unacct_memory(nr_accounted);
-       //validate_mm(mm);
 }
 
 /*
@@ -2990,17 +2291,15 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
        vma->vm_prev = NULL;
        do {
-               vma_rb_erase(vma, &mm->mm_rb);
                __vma_mt_erase(mm, vma);
                mm->map_count--;
                tail_vma = vma;
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
        *insertion_point = vma;
-       if (vma) {
+       if (vma)
                vma->vm_prev = prev;
-               vma_gap_update(vma);
-       } else
+       else
                mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
        tail_vma->vm_next = NULL;
 
@@ -3017,7 +2316,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct vm_area_struct *new;
        int err;
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
 
        if (vma->vm_ops && vma->vm_ops->split) {
                err = vma->vm_ops->split(vma, addr);
@@ -3070,7 +2369,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        mpol_put(vma_policy(new));
  out_free_vma:
        vm_area_free(new);
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        return err;
 }
 
@@ -3108,7 +2407,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 
        /*
         * arch_unmap() might do unmaps itself.  It must be called
-        * and finish any rbtree manipulation before this code
+        * and finish any tree manipulation before this code
         * runs and also starts to manipulate the rbtree.
         */
        arch_unmap(mm, start, end);
@@ -3118,7 +2417,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
        if (!vma)
                return 0;
        prev = vma->vm_prev;
-       /* we have  start < vma->vm_end  */
+       /* we have start < vma->vm_end  */
 
        /* if it doesn't overlap, we have nothing.. */
        if (vma->vm_start >= end)
@@ -3187,7 +2486,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
                }
        }
 
-       /* Detach vmas from rbtree */
+       /* Detach VMAs from the tree and the VMA linked list. */
        detach_vmas_to_be_unmapped(mm, vma, prev, end);
 
        if (downgrade)
@@ -3350,15 +2649,15 @@ out:
  *  anonymous maps.  eventually we may be able to do some
  *  brk-specific accounting here.
  */
-static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
+static int do_brk_flags(unsigned long addr, unsigned long len,
+                       unsigned long flags, struct list_head *uf)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
-       struct rb_node **rb_link, *rb_parent;
        pgoff_t pgoff = addr >> PAGE_SHIFT;
        int error;
        unsigned long mapped_addr;
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
 
        /* Until we need other flags, refuse anything except VM_EXEC. */
        if ((flags & (~VM_EXEC)) != 0)
@@ -3376,13 +2675,8 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
        /*
         * Clear old maps.  this also does some error checking for us
         */
-       //if (clean_overlaps(mm, addr, len, &prev))
-       //  return -ENOMEM;
-       while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
-                             &rb_parent)) {
-               if (do_munmap(mm, addr, len, uf))
-                       return -ENOMEM;
-       }
+       if (clean_overlaps(mm, addr, len, &prev, uf))
+               return -ENOMEM;
 
        /* Check against address space limits *after* clearing old maps... */
        if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
@@ -3415,7 +2709,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
        vma->vm_pgoff = pgoff;
        vma->vm_flags = flags;
        vma->vm_page_prot = vm_get_page_prot(flags);
-       vma_link(mm, vma, prev, rb_link, rb_parent);
+       vma_link(mm, vma, prev);
 out:
        perf_event_mmap(vma);
        mm->total_vm += len >> PAGE_SHIFT;
@@ -3423,7 +2717,7 @@ out:
        if (flags & VM_LOCKED)
                mm->locked_vm += (len >> PAGE_SHIFT);
        vma->vm_flags |= VM_SOFTDIRTY;
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        return 0;
 }
 
@@ -3438,6 +2732,7 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
        len = PAGE_ALIGN(request);
        if (len < request)
                return -ENOMEM;
+
        if (!len)
                return 0;
 
@@ -3528,12 +2823,11 @@ void exit_mmap(struct mm_struct *mm)
                vma = remove_vma(vma);
        }
 
-       mtree_destroy(&mm->mm_mt);
        trace_exit_mmap(mm);
 #if defined(CONFIG_DEBUG_MAPLE_TREE_VERBOSE)
        printk("mt_mod %px, (%px), DESTROY", mm, &mm->mm_mt);
 #endif
-
+       mtree_destroy(&mm->mm_mt);
        vm_unacct_memory(nr_accounted);
 }
 
@@ -3544,21 +2838,10 @@ void exit_mmap(struct mm_struct *mm)
 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 {
        struct vm_area_struct *prev;
-       struct rb_node **rb_link, *rb_parent;
-       unsigned long start = vma->vm_start;
-       struct vm_area_struct *overlap = NULL;
 
-       if (find_vma_links(mm, vma->vm_start, vma->vm_end,
-                          &prev, &rb_link, &rb_parent))
-               return -ENOMEM;
 
-       if ((overlap = mt_find(&mm->mm_mt, &start, vma->vm_end - 1)) != NULL) {
-               pr_err("Found vma ending at %lu\n", start - 1);
-               pr_err("vma : %lu => %lu-%lu\n", (unsigned long)overlap,
-                               overlap->vm_start, overlap->vm_end - 1);
-               mt_dump(&mm->mm_mt);
-               BUG();
-       }
+       if (has_overlaps(mm, vma->vm_start, vma->vm_end, &prev))
+               return -ENOMEM;
 
        if ((vma->vm_flags & VM_ACCOUNT) &&
             security_vm_enough_memory_mm(mm, vma_pages(vma)))
@@ -3581,7 +2864,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
                vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
        }
 
-       vma_link(mm, vma, prev, rb_link, rb_parent);
+       vma_link(mm, vma, prev);
        return 0;
 }
 
@@ -3597,11 +2880,9 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
        unsigned long vma_start = vma->vm_start;
        struct mm_struct *mm = vma->vm_mm;
        struct vm_area_struct *new_vma, *prev;
-       struct rb_node **rb_link, *rb_parent;
        bool faulted_in_anon_vma = true;
-       unsigned long index = addr;
 
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        /*
         * If anonymous vma has not yet been faulted, update new pgoff
         * to match new location, to increase its chance of merging.
@@ -3611,10 +2892,9 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                faulted_in_anon_vma = false;
        }
 
-       if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
+       if (has_overlaps(mm, addr, addr+len, &prev))
                return NULL;    /* should never get here */
-       if (mt_find(&mm->mm_mt, &index, addr+len - 1))
-               BUG();
+
        new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
                            vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
                            vma->vm_userfaultfd_ctx);
@@ -3655,10 +2935,10 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                        get_file(new_vma->vm_file);
                if (new_vma->vm_ops && new_vma->vm_ops->open)
                        new_vma->vm_ops->open(new_vma);
-               vma_link(mm, new_vma, prev, rb_link, rb_parent);
+               vma_link(mm, new_vma, prev);
                *need_rmap_locks = false;
        }
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        return new_vma;
 
 out_free_mempol:
@@ -3666,7 +2946,7 @@ out_free_mempol:
 out_free_vma:
        vm_area_free(new_vma);
 out:
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        return NULL;
 }
 
@@ -3791,7 +3071,7 @@ static struct vm_area_struct *__install_special_mapping(
        int ret;
        struct vm_area_struct *vma;
 
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        vma = vm_area_alloc(mm);
        if (unlikely(vma == NULL))
                return ERR_PTR(-ENOMEM);
@@ -3813,12 +3093,12 @@ static struct vm_area_struct *__install_special_mapping(
 
        perf_event_mmap(vma);
 
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        return vma;
 
 out:
        vm_area_free(vma);
-       validate_mm_mt(mm, NULL);
+       validate_mm(mm);
        return ERR_PTR(ret);
 }