]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: Start tracking VMAs with maple tree
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Fri, 24 Jul 2020 17:04:30 +0000 (13:04 -0400)
committerLiam R. Howlett <Liam.Howlett@Oracle.com>
Fri, 30 Oct 2020 19:07:20 +0000 (15:07 -0400)
Start tracking the VMAs with the new maple tree structure in parallel
with the rb_tree.  Add debug and trace events for maple tree operations
and duplicate the rb_tree that is created on forks into the maple tree.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
drivers/firmware/efi/efi.c
include/trace/events/mm_mt.h
mm/mmap.c

index 5e5480a0a32d7dc91cd56acd5b0af9d6a0c59e26..24418ef1f2795a17e658596e45134d27ab26fc03 100644 (file)
@@ -55,6 +55,7 @@ static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
 
 struct mm_struct efi_mm = {
        .mm_rb                  = RB_ROOT,
+       .mm_mt                  = MTREE_INIT(mm_mt, MAPLE_ALLOC_RANGE),
        .mm_users               = ATOMIC_INIT(2),
        .mm_count               = ATOMIC_INIT(1),
        MMAP_LOCK_INITIALIZER(efi_mm)
index 88cd76573345de6f7e853a85493ad54e661551f2..75b5e6ce660d52eec83e61624bf8a69ebbf03a30 100644 (file)
@@ -12,23 +12,23 @@ struct mm_struct;
 struct vm_area_struct;
 
 /**
- * __vma_mt_erase - called to erase an entry from the mm_mt
+ * vma_mt_erase - called to erase an entry from the mm_mt
  *
  * @mm:                the mm_struct which contains the mt
  * @vma:       the vma that is to be erased.
  *
  */
-TRACE_EVENT(__vma_mt_erase,
+TRACE_EVENT(vma_mt_erase,
 
        TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma),
 
        TP_ARGS(mm, vma),
 
-       TP_STRUCT__entry (
-                       __field( struct mm_struct *, mm)
-                       __field( struct vm_area_struct *, vma)
-                       __field( unsigned long, vm_start)
-                       __field( unsigned long, vm_end)
+       TP_STRUCT__entry(
+                       __field(struct mm_struct *, mm)
+                       __field(struct vm_area_struct *, vma)
+                       __field(unsigned long, vm_start)
+                       __field(unsigned long, vm_end)
        ),
 
        TP_fast_assign(
@@ -46,19 +46,19 @@ TRACE_EVENT(__vma_mt_erase,
 );
 
 /**
- * __vma_mt_szero - Called to set a range to NULL in the mm_mt
+ * vma_mt_szero - Called to set a range to NULL in the mm_mt
  *
  */
-TRACE_EVENT(__vma_mt_szero,
+TRACE_EVENT(vma_mt_szero,
        TP_PROTO(struct mm_struct *mm, unsigned long start,
                 unsigned long end),
 
        TP_ARGS(mm, start, end),
 
        TP_STRUCT__entry(
-                       __field( struct mm_struct*, mm)
-                       __field( unsigned long, start)
-                       __field( unsigned long, end)
+                       __field(struct mm_struct*, mm)
+                       __field(unsigned long, start)
+                       __field(unsigned long, end)
        ),
 
        TP_fast_assign(
@@ -74,16 +74,16 @@ TRACE_EVENT(__vma_mt_szero,
        )
 );
 
-TRACE_EVENT(__vma_mt_store,
+TRACE_EVENT(vma_mt_store,
        TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma),
 
        TP_ARGS(mm, vma),
 
        TP_STRUCT__entry(
-                       __field( struct mm_struct*, mm)
-                       __field( struct vm_area_struct*, vma)
-                       __field( unsigned long, vm_start)
-                       __field( unsigned long, vm_end)
+                       __field(struct mm_struct*, mm)
+                       __field(struct vm_area_struct*, vma)
+                       __field(unsigned long, vm_start)
+                       __field(unsigned long, vm_end)
        ),
 
        TP_fast_assign(
@@ -96,7 +96,7 @@ TRACE_EVENT(__vma_mt_store,
        TP_printk("mt_mod %p, (%p), STORE, %lu, %lu",
                  __entry->mm, __entry->vma,
                  (unsigned long) __entry->vm_start,
-                 (unsigned long) __entry->vm_end - 1
+                 (unsigned long) __entry->vm_end
        )
 );
 
@@ -107,7 +107,7 @@ TRACE_EVENT(exit_mmap,
        TP_ARGS(mm),
 
        TP_STRUCT__entry(
-                       __field( struct mm_struct*, mm)
+                       __field(struct mm_struct*, mm)
        ),
 
        TP_fast_assign(
index 12a134e0d73835a8c808067d9b8add731ac986de..004703cc4e14dcac070c577445f9115325ee9d26 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -59,9 +59,6 @@
 #include "internal.h"
 #define CREATE_TRACE_POINTS
 #include <trace/events/mm_mt.h>
-#define CONFIG_DEBUG_MAPLE_TREE
-#undef CONFIG_DEBUG_MAPLE_TREE_VERBOSE
-#define CONFIG_DEBUG_VM_RB 1
 extern void mt_validate(struct maple_tree *mt);
 
 #ifndef arch_mmap_check
@@ -388,30 +385,8 @@ static int browse_rb(struct mm_struct *mm)
 #if defined(CONFIG_DEBUG_MAPLE_TREE)
 extern void mt_dump(const struct maple_tree *mt);
 
-#if 0
-static void __vma_mt_dump(struct mm_struct *mm)
-{
-       struct vm_area_struct *entry = NULL;
-
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
-
-       rcu_read_lock();
-       mas_for_each(&mas, entry, ULONG_MAX) {
-               if (xa_is_zero(entry))
-                       continue;
-
-               pr_debug("vma: %lu-%lu\t%lu-%lu\n", entry->vm_start,
-                               entry->vm_end, mas.index, mas.last);
-       }
-       rcu_read_unlock();
-}
-#endif
-
-/*
- * Validate the maple tree
- */
-static void validate_mm_mt(struct mm_struct *mm,
-               struct vm_area_struct *ignore)
+/* Validate the maple tree */
+static void validate_mm_mt(struct mm_struct *mm)
 {
        struct maple_tree *mt = &mm->mm_mt;
        struct vm_area_struct *vma_mt, *vma = mm->mmap;
@@ -422,9 +397,6 @@ static void validate_mm_mt(struct mm_struct *mm,
                if (xa_is_zero(vma_mt))
                        continue;
 
-               if (vma && vma == ignore)
-                       vma = vma->vm_next;
-
                if (!vma)
                        break;
 
@@ -432,21 +404,20 @@ static void validate_mm_mt(struct mm_struct *mm,
                    (vma->vm_start != vma_mt->vm_start) ||
                    (vma->vm_end != vma_mt->vm_end) ||
                    (vma->vm_start != mas.index) ||
-                   (vma->vm_end -1 != mas.last)){
+                   (vma->vm_end - 1 != mas.last)) {
                        pr_emerg("issue in %s\n", current->comm);
                        dump_stack();
+#ifdef CONFIG_DEBUG_VM
                        dump_vma(vma_mt);
                        pr_emerg("and next in rb\n");
                        dump_vma(vma->vm_next);
+#endif
                        pr_emerg("mt piv: %px %lu - %lu\n", vma_mt,
                                 mas.index, mas.last);
                        pr_emerg("mt vma: %px %lu - %lu\n", vma_mt,
                                 vma_mt->vm_start, vma_mt->vm_end);
                        pr_emerg("rb vma: %px %lu - %lu\n", vma,
                                 vma->vm_start, vma->vm_end);
-                       if (ignore)
-                               pr_emerg("rb_skip %px %lu - %lu\n", ignore,
-                                       ignore->vm_start, ignore->vm_end);
                        pr_emerg("rb->next = %px %lu - %lu\n", vma->vm_next,
                                        vma->vm_next->vm_start, vma->vm_next->vm_end);
 
@@ -532,7 +503,7 @@ static void validate_mm(struct mm_struct *mm)
 }
 #else
 #define validate_mm_rb(root, ignore) do { } while (0)
-#define validate_mm_mt(root, ignore) do { } while (0)
+#define validate_mm_mt(root) do { } while (0)
 #define validate_mm(mm) do { } while (0)
 #endif
 
@@ -821,48 +792,59 @@ static void __vma_link_file(struct vm_area_struct *vma)
                flush_dcache_mmap_unlock(mapping);
        }
 }
-static void __vma_mt_erase(struct mm_struct *mm, struct vm_area_struct *vma)
+/* Private
+ * vma_mt_erase() - erase a VMA entry from the maple tree.
+ *
+ * @mm: The struct_mm
+ * @vma: The vm_area_struct to erase from the maple tree.
+ */
+static inline void vma_mt_erase(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       trace___vma_mt_erase(mm, vma);
-#if defined(CONFIG_DEBUG_MAPLE_TREE_VERBOSE)
-       printk("mt_mod %px, (%px), ERASE, %lu, %lu,", mm, vma, vma->vm_start,
-              vma->vm_end - 1);
-#endif
+       trace_vma_mt_erase(mm, vma);
        mtree_erase(&mm->mm_mt, vma->vm_start);
-       mt_validate(&mm->mm_mt);
 }
-static void __vma_mt_szero(struct mm_struct *mm, unsigned long start,
+/* Private
+ * vma_mt_szero() - Set a given range to zero.  Used when modifying a
+ * vm_area_struct start or end.
+ *
+ * @mm: The struct_mm
+ * @start: The start address to zero
+ * @end: The end address to zero.
+ */
+static inline void vma_mt_szero(struct mm_struct *mm, unsigned long start,
                unsigned long end)
 {
-       trace___vma_mt_szero(mm, start, end);
-#if defined(CONFIG_DEBUG_MAPLE_TREE_VERBOSE)
-       printk("mt_mod %px, (%px), SNULL, %lu, %lu,", mm, NULL, start,
-              end - 1);
-#endif
+       trace_vma_mt_szero(mm, start, end);
        mtree_store_range(&mm->mm_mt, start, end - 1, NULL, GFP_KERNEL);
 }
-static void __vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma)
+/* Private
+ * vma_mt_store() - Store a given vm_area_struct in the maple tree.
+ *
+ * @mm: The struct_mm
+ * @vma: The vm_area_struct to store in the maple tree.
+ */
+static inline void vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       trace___vma_mt_store(mm, vma);
-#if defined(CONFIG_DEBUG_MAPLE_TREE_VERBOSE)
-       printk("mt_mod %px, (%px), STORE, %lu, %lu,", mm, vma, vma->vm_start,
-              vma->vm_end - 1);
-#endif
+       trace_vma_mt_store(mm, vma);
        mtree_store_range(&mm->mm_mt, vma->vm_start, vma->vm_end - 1, vma,
                GFP_KERNEL);
-       mt_validate(&mm->mm_mt);
 }
+/* External interface to store a vma into an @mm.  Used in fork.
+ *
+ * @mm: The struct_mm
+ * @vma: The vm_area_struct to store in the maple tree.
+ */
 void vma_store(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       __vma_mt_store(mm, vma);
+       vma_mt_store(mm, vma);
 }
-// LRH: Needed - update linked list, should fine.
+
 static void
 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
        struct vm_area_struct *prev, struct rb_node **rb_link,
        struct rb_node *rb_parent)
 {
-       __vma_mt_store(mm, vma);
+       vma_mt_store(mm, vma);
        __vma_link_list(mm, vma, prev);
        __vma_link_rb(mm, vma, rb_link, rb_parent);
 }
@@ -937,7 +919,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
        int remove_next = 0;
 
        validate_mm(mm);
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
 
        if (next && !insert) {
                struct vm_area_struct *exporter = NULL, *importer = NULL;
@@ -1067,20 +1049,19 @@ again:
                unsigned long old_start = vma->vm_start;
                vma->vm_start = start;
                if (old_start < start)
-                       __vma_mt_szero(mm, old_start, start);
+                       vma_mt_szero(mm, old_start, start);
                start_changed = true;
        }
        if (end != vma->vm_end) {
                unsigned long old_end = vma->vm_end;
                vma->vm_end = end;
                if (old_end > end)
-                       __vma_mt_szero(mm, end - 1, old_end);
+                       vma_mt_szero(mm, end - 1, old_end);
                end_changed = true;
        }
 
-       if (end_changed || start_changed) {
-               __vma_mt_store(mm, vma);
-       }
+       if (end_changed || start_changed)
+               vma_mt_store(mm, vma);
 
        vma->vm_pgoff = pgoff;
        if (adjust_next) {
@@ -1090,7 +1071,7 @@ again:
                next->vm_pgoff += adjust_next >> PAGE_SHIFT;
                // the vma_store is necessary as the adjust_next may be
                // negative and expand backwards.
-               __vma_mt_store(mm, next);
+               vma_mt_store(mm, next);
        }
 
        if (file) {
@@ -1104,6 +1085,8 @@ again:
                /*
                 * vma_merge has merged next into vma, and needs
                 * us to remove next before dropping the locks.
+                * Since we have expanded over this vma, the maple tree will
+                * have overwritten by storing the value
                 */
                /* Since we have expanded over this vma, the maple tree will
                 * have overwritten by storing the value */
@@ -1227,7 +1210,7 @@ again:
                uprobe_mmap(insert);
 
        validate_mm(mm);
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
 
        return 0;
 }
@@ -1387,7 +1370,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
        struct vm_area_struct *area, *next;
        int err;
 
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        /*
         * We later require that vma->vm_flags == vm_flags,
         * so this tests vma->vm_flags & VM_SPECIAL, too.
@@ -1463,7 +1446,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                khugepaged_enter_vma_merge(area, vm_flags);
                return area;
        }
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
 
        return NULL;
 }
@@ -1957,7 +1940,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        struct rb_node **rb_link, *rb_parent;
        unsigned long charged = 0;
 
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        /* Check against address space limit. */
        if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
                unsigned long nr_pages;
@@ -2122,7 +2105,7 @@ out:
 
        vma_set_page_prot(vma);
 
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        return addr;
 
 unmap_and_free_vma:
@@ -2142,7 +2125,7 @@ free_vma:
 unacct_error:
        if (charged)
                vm_unacct_memory(charged);
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        return error;
 }
 
@@ -2160,19 +2143,21 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
        struct vm_area_struct *vma;
        unsigned long length, low_limit, high_limit, gap_start, gap_end;
        unsigned long gap;
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
 
+       MA_STATE(mas, &mm->mm_mt, 0, 0);
        /* Adjust search length to account for worst case alignment overhead */
        length = info->length + info->align_mask;
        if (length < info->length)
                return -ENOMEM;
 
 
-       // Maple tree is self contained.
+       /* Maple tree is self contained. */
        rcu_read_lock();
-       if (mas_get_unmapped_area(&mas, info->low_limit,
-                               info->high_limit - 1, length))
+       if (mas_get_unmapped_area(&mas, info->low_limit, info->high_limit - 1,
+                                 length)) {
+               rcu_read_unlock();
                return -ENOMEM;
+       }
        rcu_read_unlock();
        gap = mas.index;
        gap += (info->align_offset - gap) & info->align_mask;
@@ -2269,9 +2254,9 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
        struct vm_area_struct *vma = NULL;
        unsigned long length, low_limit, high_limit, gap_start, gap_end;
        unsigned long gap;
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
 
-       validate_mm_mt(mm, NULL);
+       MA_STATE(mas, &mm->mm_mt, 0, 0);
+       validate_mm_mt(mm);
        /* Adjust search length to account for worst case alignment overhead */
        length = info->length + info->align_mask;
        if (length < info->length)
@@ -2279,9 +2264,10 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 
        rcu_read_lock();
        if (mas_get_unmapped_area_rev(&mas, info->low_limit, info->high_limit,
-                               length))
+                               length)) {
+               rcu_read_unlock();
                return -ENOMEM;
-
+       }
        rcu_read_unlock();
        gap = (mas.index + info->align_mask) & ~info->align_mask;
        gap -= info->align_offset & info->align_mask;
@@ -2372,7 +2358,6 @@ found_highest:
        VM_BUG_ON(gap_end < gap_start);
 
        if (gap != gap_end) {
-               struct vm_area_struct *rb_find_vma(struct mm_struct *mm, unsigned long addr);
                pr_err("%s: %px Gap was found: mt %lu gap_end %lu\n", __func__,
                                mm, gap, gap_end);
                pr_err("window was %lu - %lu size %lu\n", info->high_limit,
@@ -2382,10 +2367,13 @@ found_highest:
                pr_err("mas.index %lu align mask %lu offset %lu\n", mas.index,
                                info->align_mask, info->align_offset);
                pr_err("rb_find_vma find on %lu => %px (%px)\n", mas.index,
-                               rb_find_vma(mm, mas.index), vma);
+                               find_vma(mm, mas.index), vma);
+#if defined(CONFIG_DEBUG_MAPLE_TREE)
                mt_dump(&mm->mm_mt);
+#endif
                {
                        struct vm_area_struct *dv = mm->mmap;
+
                        while (dv) {
                                printk("vma %px %lu-%lu\n", dv, dv->vm_start, dv->vm_end);
                                dv = dv->vm_next;
@@ -2631,7 +2619,6 @@ struct vm_area_struct *rb_find_vma(struct mm_struct *mm, unsigned long addr)
                vmacache_update(addr, vma);
        return vma;
 }
-
 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
        struct vm_area_struct *ret = rb_find_vma(mm, addr);
@@ -2761,7 +2748,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        unsigned long gap_addr;
        int error = 0;
 
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
 
@@ -2838,7 +2825,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        anon_vma_unlock_write(vma->anon_vma);
        khugepaged_enter_vma_merge(vma, vma->vm_flags);
        validate_mm(mm);
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        return error;
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -2886,8 +2873,32 @@ int expand_downwards(struct vm_area_struct *vma,
                grow = (vma->vm_start - address) >> PAGE_SHIFT;
 
                error = -ENOMEM;
-               if (grow > vma->vm_pgoff)
-                       goto no_update;
+               if (grow <= vma->vm_pgoff) {
+                       error = acct_stack_growth(vma, size, grow);
+                       if (!error) {
+                               /*
+                                * vma_gap_update() doesn't support concurrent
+                                * updates, but we only hold a shared mmap_lock
+                                * lock here, so we need to protect against
+                                * concurrent vma expansions.
+                                * anon_vma_lock_write() doesn't help here, as
+                                * we don't guarantee that all growable vmas
+                                * in a mm share the same root anon vma.
+                                * So, we reuse mm->page_table_lock to guard
+                                * against concurrent vma expansions.
+                                */
+                               spin_lock(&mm->page_table_lock);
+                               if (vma->vm_flags & VM_LOCKED)
+                                       mm->locked_vm += grow;
+                               vm_stat_account(mm, vma->vm_flags, grow);
+                               anon_vma_interval_tree_pre_update_vma(vma);
+                               vma->vm_start = address;
+                               vma->vm_pgoff -= grow;
+                               /* Overwrite old entry in mtree. */
+                               vma_mt_store(mm, vma);
+                               anon_vma_interval_tree_post_update_vma(vma);
+                               vma_gap_update(vma);
+                               spin_unlock(&mm->page_table_lock);
 
                error = acct_stack_growth(vma, size, grow);
                if (error)
@@ -3054,7 +3065,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        vma->vm_prev = NULL;
        do {
                vma_rb_erase(vma, &mm->mm_rb);
-               __vma_mt_erase(mm, vma);
+               vma_mt_erase(mm, vma);
                mm->map_count--;
                tail_vma = vma;
                vma = vma->vm_next;
@@ -3091,7 +3102,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct vm_area_struct *new;
        int err;
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
 
        if (vma->vm_ops && vma->vm_ops->split) {
                err = vma->vm_ops->split(vma, addr);
@@ -3144,7 +3155,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        mpol_put(vma_policy(new));
  out_free_vma:
        vm_area_free(new);
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        return err;
 }
 
@@ -3433,7 +3444,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
        pgoff_t pgoff = addr >> PAGE_SHIFT;
        int error;
        unsigned long mapped_addr;
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
 
        /* Until we need other flags, refuse anything except VM_EXEC. */
        if ((flags & (~VM_EXEC)) != 0)
@@ -3491,7 +3502,7 @@ out:
        if (flags & VM_LOCKED)
                mm->locked_vm += (len >> PAGE_SHIFT);
        vma->vm_flags |= VM_SOFTDIRTY;
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        return 0;
 }
 
@@ -3597,12 +3608,8 @@ void exit_mmap(struct mm_struct *mm)
                cond_resched();
        }
 
-       mtree_destroy(&mm->mm_mt);
        trace_exit_mmap(mm);
-#if defined(CONFIG_DEBUG_MAPLE_TREE_VERBOSE)
-       printk("mt_mod %px, (%px), DESTROY", mm, &mm->mm_mt);
-#endif
-
+       mtree_destroy(&mm->mm_mt);
        vm_unacct_memory(nr_accounted);
 }
 
@@ -3621,11 +3628,15 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
                           &prev, &rb_link, &rb_parent))
                return -ENOMEM;
 
-       if ((overlap = mt_find(&mm->mm_mt, &start, vma->vm_end - 1)) != NULL) {
+       overlap = mt_find(&mm->mm_mt, &start, vma->vm_end - 1);
+       if (overlap) {
+
                pr_err("Found vma ending at %lu\n", start - 1);
                pr_err("vma : %lu => %lu-%lu\n", (unsigned long)overlap,
                                overlap->vm_start, overlap->vm_end - 1);
+#if defined(CONFIG_DEBUG_MAPLE_TREE)
                mt_dump(&mm->mm_mt);
+#endif
                BUG();
        }
 
@@ -3670,7 +3681,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
        bool faulted_in_anon_vma = true;
        unsigned long index = addr;
 
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        /*
         * If anonymous vma has not yet been faulted, update new pgoff
         * to match new location, to increase its chance of merging.
@@ -3727,7 +3738,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                vma_link(mm, new_vma, prev, rb_link, rb_parent);
                *need_rmap_locks = false;
        }
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        return new_vma;
 
 out_free_mempol:
@@ -3735,7 +3746,7 @@ out_free_mempol:
 out_free_vma:
        vm_area_free(new_vma);
 out:
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        return NULL;
 }
 
@@ -3860,7 +3871,7 @@ static struct vm_area_struct *__install_special_mapping(
        int ret;
        struct vm_area_struct *vma;
 
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        vma = vm_area_alloc(mm);
        if (unlikely(vma == NULL))
                return ERR_PTR(-ENOMEM);
@@ -3882,12 +3893,12 @@ static struct vm_area_struct *__install_special_mapping(
 
        perf_event_mmap(vma);
 
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        return vma;
 
 out:
        vm_area_free(vma);
-       validate_mm_mt(mm, NULL);
+       validate_mm_mt(mm);
        return ERR_PTR(ret);
 }