From c40b679b6499fc22a12ba4c95eb37192eb5869a5 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 20 Feb 2020 16:39:36 -0500 Subject: [PATCH] mm/mmap and mm/mprotect: Alter mmap erase method. Can't call erase every time the rbtree erases as the maple tree doesn't allow overlaps, so it's not necessary. mprotect was too noisy. mmap also had debug altered and verify changed in this commit. Signed-off-by: Liam R. Howlett --- mm/mmap.c | 99 +++++++++++++++++++++++++++++++++------------------ mm/mprotect.c | 2 +- 2 files changed, 66 insertions(+), 35 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 4fcd86e70403..3c5a9eb9a7ad 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -57,6 +57,9 @@ #include #include "internal.h" +#define CONFIG_DEBUG_MAPLE_TREE +#define CONFIG_DEBUG_VM_RB +extern void mt_validate(struct maple_tree *mt); #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) @@ -411,21 +414,35 @@ static void validate_mm_mt(struct mm_struct *mm, if (mas_retry(&mas, vma_mt)) continue; + if (vma && vma == ignore) + vma = vma->vm_next; + if (!vma) break; - if (vma != vma_mt) { + + if ((vma != vma_mt) || + (vma->vm_start != vma_mt->vm_start) || + (vma->vm_end != vma_mt->vm_end)) { pr_emerg("mt: %px %lu - %lu\n", vma_mt, vma_mt->vm_start, vma_mt->vm_end); pr_emerg("rb: %px %lu - %lu\n", vma, vma->vm_start, vma->vm_end); + if (ignore) + pr_emerg("rb_skip %px %lu - %lu\n", ignore, + ignore->vm_start, ignore->vm_end); + pr_emerg("rb->next = %px %lu - %lu\n", vma->vm_next, + vma->vm_next->vm_start, vma->vm_next->vm_end); + + mt_dump(mas.tree); } VM_BUG_ON(vma != vma_mt); - if (vma) - vma = vma->vm_next; + vma = vma->vm_next; + } VM_BUG_ON(vma); rcu_read_unlock(); + mt_validate(&mm->mm_mt); } #endif static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) @@ -458,15 +475,15 @@ static void validate_mm(struct mm_struct *mm) struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; - pr_cont("vma: %lu-%lu", vma->vm_start, vma->vm_end); +// pr_cont("vma: %lu-%lu", vma->vm_start, vma->vm_end); if (anon_vma) { - pr_cont(" anon"); +// pr_cont(" anon"); anon_vma_lock_read(anon_vma); - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) - anon_vma_interval_tree_verify(avc); +// list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) +// anon_vma_interval_tree_verify(avc); anon_vma_unlock_read(anon_vma); } - pr_cont("\n"); +// pr_cont("\n"); highest_address = vm_end_gap(vma); vma = vma->vm_next; @@ -477,13 +494,20 @@ static void validate_mm(struct mm_struct *mm) if (mas_retry(&mas, entry)) continue; + VM_BUG_ON_MM(!entry, mm); + if (entry->vm_end != mas.last + 1) { - printk("vma: entry %lu-%lu\tmt %lu-%lu\n", - entry->vm_start, entry->vm_end, - mas.index, mas.last); + printk("vma: %px entry %lu-%lu\tmt %lu-%lu\n", + mm, entry->vm_start, entry->vm_end, + mas.index, mas.last); mt_dump(mas.tree); } VM_BUG_ON_MM(entry->vm_end != mas.last + 1, mm); + if (entry->vm_start != mas.index) { + printk("vma: %px entry %px %lu - %lu doesn't match\n", + mm, entry, entry->vm_start, entry->vm_end); + mt_dump(mas.tree); + } VM_BUG_ON_MM(entry->vm_start != mas.index, mm); mt_highest_address = vm_end_gap(entry); mt_i++; @@ -804,16 +828,25 @@ static void __vma_link_file(struct vm_area_struct *vma) } static void __vma_mt_erase(struct mm_struct *mm, struct vm_area_struct *vma) { - //printk("%s: mt_mod %p: ERASE, %lu, %lu,\n", __func__, mm, vma->vm_start, - // vma->vm_end - 1); +// printk("%s: mt_mod %px (%px): ERASE, %lu, %lu,\n", __func__, mm, vma, +// vma->vm_start, vma->vm_end - 1); mtree_erase(&mm->mm_mt, vma->vm_start); + mt_validate(&mm->mm_mt); +} +static void __vma_mt_szero(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ +// printk("%s: mt_mod %px (%px): SNULL, %lu, %lu,\n", __func__, mm, NULL, +// start, end - 1); + mtree_store_range(&mm->mm_mt, start, end - 1, NULL, GFP_KERNEL); } static void __vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma) { - //printk("%s: mt_mod %p: STORE, %lu, %lu,\n", __func__, mm, vma->vm_start, - // vma->vm_end - 1); +// printk("%s: mt_mod %px (%px): STORE, %lu, %lu,\n", __func__, mm, vma, +// vma->vm_start, vma->vm_end - 1); mtree_store_range(&mm->mm_mt, vma->vm_start, vma->vm_end - 1, vma, GFP_KERNEL); + mt_validate(&mm->mm_mt); } void vma_store(struct mm_struct *mm, struct vm_area_struct *vma) { @@ -828,7 +861,7 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, __vma_mt_store(mm, vma); __vma_link_list(mm, vma, prev); __vma_link_rb(mm, vma, rb_link, rb_parent); - validate_mm_mt(mm, NULL); + //validate_mm_mt(mm, NULL); } static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, @@ -869,12 +902,7 @@ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) mm->map_count++; } -<<<<<<< HEAD -static __always_inline void __vma_unlink(struct mm_struct *mm, -======= -// LRH: Fixed. static __always_inline void __vma_unlink_common(struct mm_struct *mm, ->>>>>>> 6942ca05fee78... mm: Add maple tree to init-mm,mmap, mprotect, mm_types struct vm_area_struct *vma, struct vm_area_struct *ignore) { @@ -907,6 +935,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, int remove_next = 0; validate_mm(mm); + validate_mm_mt(mm, NULL); + //printk("%s %px %lu %lu\n", __func__, vma, start, end); if (next && !insert) { struct vm_area_struct *exporter = NULL, *importer = NULL; @@ -934,7 +964,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, } else { VM_WARN_ON(expand != vma); /* - * case 1, 6, 7, remove_next == 2 is case 6, + * case 1, 6, 7: remove_next == 2 is case 6, * remove_next == 1 is case 1 or 7. */ remove_next = 1 + (end > next->vm_end); @@ -1032,15 +1062,17 @@ again: } if (start != vma->vm_start) { + unsigned long old_start = vma->vm_start; vma->vm_start = start; - if (vma->vm_start < start) - __vma_mt_erase(mm, vma); + if (old_start < start) + __vma_mt_szero(mm, old_start, start); start_changed = true; } if (end != vma->vm_end) { - if (vma->vm_end > end) - __vma_mt_erase(mm, vma); + unsigned long old_end = vma->vm_end; vma->vm_end = end; + if (old_end > end) + __vma_mt_szero(mm, end - 1, old_end); end_changed = true; } @@ -1050,14 +1082,9 @@ again: vma->vm_pgoff = pgoff; if (adjust_next) { -<<<<<<< HEAD next->vm_start += adjust_next; next->vm_pgoff += adjust_next >> PAGE_SHIFT; -======= - next->vm_start += adjust_next << PAGE_SHIFT; - next->vm_pgoff += adjust_next; __vma_mt_store(mm, next); ->>>>>>> 6942ca05fee78... mm: Add maple tree to init-mm,mmap, mprotect, mm_types } if (file) { @@ -1072,6 +1099,8 @@ again: * vma_merge has merged next into vma, and needs * us to remove next before dropping the locks. */ + /* Since we have expanded over this vma, the maple tree will + * have overwritten by storing the value */ if (remove_next != 3) __vma_unlink(mm, next, next); else @@ -1093,6 +1122,9 @@ again: * us to insert it before dropping the locks * (it may either follow vma or precede it). */ + /* maple tree store is done in the __vma_link call in this + * call graph */ +// printk("insert %px %lu - %lu\n", insert, insert->vm_start, insert->vm_end); __insert_vm_struct(mm, insert); } else { if (start_changed) @@ -1190,6 +1222,7 @@ again: uprobe_mmap(insert); validate_mm(mm); + validate_mm_mt(mm, NULL); return 0; } @@ -2225,8 +2258,6 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) unsigned long gap; MA_STATE(mas, &mm->mm_mt, 0, 0); - validate_mm_mt(mm, NULL); - /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) @@ -2964,7 +2995,7 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) vma = remove_vma(vma); } while (vma); vm_unacct_memory(nr_accounted); - validate_mm(mm); + //validate_mm(mm); } /* diff --git a/mm/mprotect.c b/mm/mprotect.c index 1505664dbbd4..ac843a611f4b 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -407,7 +407,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, int error; int dirty_accountable = 0; - printk("vma area %lu-%lu\n", vma->vm_start, vma->vm_end); + //printk("vma area %lu-%lu\n", vma->vm_start, vma->vm_end); if (newflags == oldflags) { *pprev = vma; return 0; -- 2.50.1