]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: Update validate_mm() to use vma iterator
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 1 Dec 2022 16:53:06 +0000 (11:53 -0500)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Tue, 13 Dec 2022 21:22:37 +0000 (16:22 -0500)
Use the vma iterator in the validation code and combine the code to
check the maple tree into the main validate_mm() function.

Introduce a new function vma_iter_dump_tree() to dump the maple tree in
hex layout.

Replace all calls to validate_mm_mt() with validate_mm().

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
include/linux/mmdebug.h
mm/debug.c
mm/mmap.c

index b8728d11c94909ac1bed23b3abc87def64229479..7c3e7b0b0e8fd6e91140d50432cf98eecb253193 100644 (file)
@@ -8,10 +8,12 @@
 struct page;
 struct vm_area_struct;
 struct mm_struct;
+struct vma_iterator;
 
 void dump_page(struct page *page, const char *reason);
 void dump_vma(const struct vm_area_struct *vma);
 void dump_mm(const struct mm_struct *mm);
+void vma_iter_dump_tree(const struct vma_iterator *vmi);
 
 #ifdef CONFIG_DEBUG_VM
 #define VM_BUG_ON(cond) BUG_ON(cond)
@@ -74,6 +76,17 @@ void dump_mm(const struct mm_struct *mm);
        }                                                               \
        unlikely(__ret_warn_once);                                      \
 })
+#define VM_WARN_ON_ONCE_MM(cond, mm)           ({                      \
+       static bool __section(".data.once") __warned;                   \
+       int __ret_warn_once = !!(cond);                                 \
+                                                                       \
+       if (unlikely(__ret_warn_once && !__warned)) {                   \
+               dump_mm(mm);                                            \
+               __warned = true;                                        \
+               WARN_ON(1);                                             \
+       }                                                               \
+       unlikely(__ret_warn_once);                                      \
+})
 
 #define VM_WARN_ON(cond) (void)WARN_ON(cond)
 #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
@@ -90,6 +103,7 @@ void dump_mm(const struct mm_struct *mm);
 #define VM_WARN_ON_ONCE_PAGE(cond, page)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ON_FOLIO(cond, folio)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ON_ONCE_FOLIO(cond, folio)  BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_MM(cond, mm)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
 #endif
index 0fd15ba70d16317d76163ba7340e2d54cfd064d9..d5bd9a6f87a6097b4964a5cd1f27c18d54bd4860 100644 (file)
@@ -259,4 +259,12 @@ void page_init_poison(struct page *page, size_t size)
        if (page_init_poisoning)
                memset(page, PAGE_POISON_PATTERN, size);
 }
+
+void vma_iter_dump_tree(const struct vma_iterator *vmi)
+{
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+       mt_dump(vmi->mas.tree, mt_dump_hex);
+#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
+}
+
 #endif         /* CONFIG_DEBUG_VM */
index 7e65cde5c08ed8c99d8943629a72cd7d3bee1bd1..d86a2615bdf080f7656d36cfacc5a081fb457932 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -171,13 +171,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
                printk("%lu > %lu\n", vmi->mas.index, vma->vm_start);
                printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
                printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
-               mt_dump(vmi->mas.tree, mt_dump_hex);
+               vma_iter_dump_tree(vmi);
        }
        if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last <  vma->vm_start)) {
                printk("%lu < %lu\n", vmi->mas.last, vma->vm_start);
                printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
                printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
-               mt_dump(vmi->mas.tree, mt_dump_hex);
+               vma_iter_dump_tree(vmi);
        }
 #endif
 
@@ -357,62 +357,45 @@ out:
        return origbrk;
 }
 
-#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
-extern void mt_validate(struct maple_tree *mt);
-extern void mt_dump(const struct maple_tree *mt, enum mt_dump_format fmt);
-
-/* Validate the maple tree */
-static void validate_mm_mt(struct mm_struct *mm)
-{
-       struct maple_tree *mt = &mm->mm_mt;
-       struct vm_area_struct *vma_mt;
-
-       MA_STATE(mas, mt, 0, 0);
-
-       mt_validate(&mm->mm_mt);
-       mas_for_each(&mas, vma_mt, ULONG_MAX) {
-               if ((vma_mt->vm_start != mas.index) ||
-                   (vma_mt->vm_end - 1 != mas.last)) {
-                       pr_emerg("issue in %s\n", current->comm);
-                       dump_stack();
-                       dump_vma(vma_mt);
-                       pr_emerg("mt piv: %p %lu - %lu\n", vma_mt,
-                                mas.index, mas.last);
-                       pr_emerg("mt vma: %p %lu - %lu\n", vma_mt,
-                                vma_mt->vm_start, vma_mt->vm_end);
-
-                       mt_dump(mas.tree, mt_dump_hex);
-                       if (vma_mt->vm_end != mas.last + 1) {
-                               pr_err("vma: %p vma_mt %lu-%lu\tmt %lu-%lu\n",
-                                               mm, vma_mt->vm_start, vma_mt->vm_end,
-                                               mas.index, mas.last);
-                               mt_dump(mas.tree, mt_dump_hex);
-                       }
-                       VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm);
-                       if (vma_mt->vm_start != mas.index) {
-                               pr_err("vma: %p vma_mt %p %lu - %lu doesn't match\n",
-                                               mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end);
-                               mt_dump(mas.tree, mt_dump_hex);
-                       }
-                       VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm);
-               }
-       }
-}
+#if defined(CONFIG_DEBUG_VM)
 
 static void validate_mm(struct mm_struct *mm)
 {
        int bug = 0;
        int i = 0;
        struct vm_area_struct *vma;
-       MA_STATE(mas, &mm->mm_mt, 0, 0);
+       VMA_ITERATOR(vmi, mm, 0);
 
-       validate_mm_mt(mm);
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+       mt_validate(&mm->mm_mt);
+#endif
 
-       mas_for_each(&mas, vma, ULONG_MAX) {
+       for_each_vma(vmi, vma) {
 #ifdef CONFIG_DEBUG_VM_RB
                struct anon_vma *anon_vma = vma->anon_vma;
                struct anon_vma_chain *avc;
+#endif
+               unsigned long vmi_start, vmi_end;
+               bool warn = 0;
+
+               vmi_start = vma_iter_addr(&vmi);
+               vmi_end = vma_iter_end(&vmi);
+               if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
+                       warn = 1;
+
+               if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
+                       warn = 1;
 
+               if (warn) {
+                       pr_emerg("issue in %s\n", current->comm);
+                       dump_stack();
+                       dump_vma(vma);
+                       pr_emerg("tree range: %px start %lx end %lx\n", vma,
+                                vmi_start, vmi_end - 1);
+                       vma_iter_dump_tree(&vmi);
+               }
+
+#ifdef CONFIG_DEBUG_VM_RB
                if (anon_vma) {
                        anon_vma_lock_read(anon_vma);
                        list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
@@ -423,16 +406,15 @@ static void validate_mm(struct mm_struct *mm)
                i++;
        }
        if (i != mm->map_count) {
-               pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i);
+               pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
                bug = 1;
        }
        VM_BUG_ON_MM(bug, mm);
 }
 
-#else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
-#define validate_mm_mt(root) do { } while (0)
+#else /* !CONFIG_DEBUG_VM */
 #define validate_mm(mm) do { } while (0)
-#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
+#endif /* CONFIG_DEBUG_VM */
 
 /*
  * vma has some anon_vma assigned, and is already inserted on that
@@ -2216,7 +2198,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
        struct vm_area_struct *new;
        int err;
 
-       validate_mm_mt(vma->vm_mm);
+       validate_mm(vma->vm_mm);
 
        if (vma->vm_ops && vma->vm_ops->may_split) {
                err = vma->vm_ops->may_split(vma, addr);
@@ -2279,7 +2261,7 @@ out_free_vmi:
        vma_iter_free(vmi);
 out_free_vma:
        vm_area_free(new);
-       validate_mm_mt(vma->vm_mm);
+       validate_mm(vma->vm_mm);
        return err;
 }
 
@@ -2901,7 +2883,7 @@ static int do_brk_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
 
        arch_unmap(mm, newbrk, oldbrk);
        ret = do_vmi_align_munmap(vmi, vma, mm, newbrk, oldbrk, uf, true);
-       validate_mm_mt(mm);
+       validate_mm(mm);
        return ret;
 }
 
@@ -2923,7 +2905,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
        struct mm_struct *mm = current->mm;
        struct vma_prepare vp;
 
-       validate_mm_mt(mm);
+       validate_mm(mm);
        /*
         * Check against address space limits by the changed size
         * Note: This happens *after* clearing old mappings in some code paths.
@@ -3163,7 +3145,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
        bool faulted_in_anon_vma = true;
        VMA_ITERATOR(vmi, mm, addr);
 
-       validate_mm_mt(mm);
+       validate_mm(mm);
        /*
         * If anonymous vma has not yet been faulted, update new pgoff
         * to match new location, to increase its chance of merging.
@@ -3221,7 +3203,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                        goto out_vma_link;
                *need_rmap_locks = false;
        }
-       validate_mm_mt(mm);
+       validate_mm(mm);
        return new_vma;
 
 out_vma_link:
@@ -3237,7 +3219,7 @@ out_free_mempol:
 out_free_vma:
        vm_area_free(new_vma);
 out:
-       validate_mm_mt(mm);
+       validate_mm(mm);
        return NULL;
 }
 
@@ -3374,7 +3356,7 @@ static struct vm_area_struct *__install_special_mapping(
        int ret;
        struct vm_area_struct *vma;
 
-       validate_mm_mt(mm);
+       validate_mm(mm);
        vma = vm_area_alloc(mm);
        if (unlikely(vma == NULL))
                return ERR_PTR(-ENOMEM);
@@ -3397,12 +3379,12 @@ static struct vm_area_struct *__install_special_mapping(
 
        perf_event_mmap(vma);
 
-       validate_mm_mt(mm);
+       validate_mm(mm);
        return vma;
 
 out:
        vm_area_free(vma);
-       validate_mm_mt(mm);
+       validate_mm(mm);
        return ERR_PTR(ret);
 }