From 02bb133b171b19db95d5210983e966e87089ef5b Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Wed, 24 Jun 2020 11:01:44 -0400 Subject: [PATCH] mm/mmap: Update maple_tree prints and validations. Signed-off-by: Liam R. Howlett --- mm/mmap.c | 61 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 22 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 888da5f45781..15993f1e9a3b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -430,6 +430,11 @@ static void validate_mm_mt(struct mm_struct *mm, (vma->vm_end != vma_mt->vm_end) || (vma->vm_start != mas.index) || (vma->vm_end -1 != mas.last)){ + pr_emerg("issue in %s\n", current->comm); + dump_stack(); + dump_vma(vma_mt); + pr_emerg("and next in rb\n"); + dump_vma(vma->vm_next); pr_emerg("mt piv: %px %lu - %lu\n", vma_mt, mas.index, mas.last); pr_emerg("mt vma: %px %lu - %lu\n", vma_mt, @@ -770,37 +775,32 @@ static void __vma_link_file(struct vm_area_struct *vma) flush_dcache_mmap_unlock(mapping); } } - static void __vma_mt_erase(struct mm_struct *mm, struct vm_area_struct *vma) { -// printk("%s: mt_mod %px (%px): ERASE, %lu, %lu,\n", __func__, mm, vma, -// vma->vm_start, vma->vm_end - 1); +// printk("mt_mod %px, (%px), ERASE, %lu, %lu", mm, vma, vma->vm_start, +// vma->vm_end); mtree_erase(&mm->mm_mt, vma->vm_start); mt_validate(&mm->mm_mt); } - static void __vma_mt_szero(struct mm_struct *mm, unsigned long start, unsigned long end) { -// printk("%s: mt_mod %px (%px): SNULL, %lu, %lu,\n", __func__, mm, NULL, -// start, end - 1); +// printk("mt_mod %px, (NULL), SNULL, %lu, %lu", mm, start, +// end); mtree_store_range(&mm->mm_mt, start, end - 1, NULL, GFP_KERNEL); } - static void __vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma) { -// printk("%s: mt_mod %px (%px): STORE, %lu, %lu,\n", __func__, mm, vma, -// vma->vm_start, vma->vm_end - 1); +// printk("mt_mod %px, (%px), STORE, %lu, %lu", mm, vma, vma->vm_start, +// vma->vm_end); mtree_store_range(&mm->mm_mt, vma->vm_start, vma->vm_end - 1, vma, GFP_KERNEL); mt_validate(&mm->mm_mt); } - void vma_store(struct mm_struct *mm, struct vm_area_struct *vma) { __vma_mt_store(mm, vma); } - // LRH: Needed - update linked list, should fine. static void __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, @@ -810,7 +810,6 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, __vma_mt_store(mm, vma); __vma_link_list(mm, vma, prev); __vma_link_rb(mm, vma, rb_link, rb_parent); - //validate_mm_mt(mm, NULL); } static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, @@ -886,7 +885,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, validate_mm(mm); validate_mm_mt(mm, NULL); - //printk("%s %px %lu %lu\n", __func__, vma, start, end); if (next && !insert) { struct vm_area_struct *exporter = NULL, *importer = NULL; @@ -1078,7 +1076,6 @@ again: */ /* maple tree store is done in the __vma_link call in this * call graph */ -// printk("insert %px %lu - %lu\n", insert, insert->vm_start, insert->vm_end); __insert_vm_struct(mm, insert); } else { if (start_changed) @@ -1337,6 +1334,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *area, *next; int err; + validate_mm_mt(mm, NULL); /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. @@ -1415,6 +1413,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, khugepaged_enter_vma_merge(area, vm_flags); return area; } + validate_mm_mt(mm, NULL); return NULL; } @@ -1914,6 +1913,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, struct rb_node **rb_link, *rb_parent; unsigned long charged = 0; + validate_mm_mt(mm, NULL); /* Check against address space limit. */ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { unsigned long nr_pages; @@ -2051,6 +2051,7 @@ out: vma_set_page_prot(vma); + validate_mm_mt(mm, NULL); return addr; unmap_and_free_vma: @@ -2070,6 +2071,7 @@ free_vma: unacct_error: if (charged) vm_unacct_memory(charged); + validate_mm_mt(mm, NULL); return error; } @@ -2193,11 +2195,12 @@ found: unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma = NULL; unsigned long length, low_limit, high_limit, gap_start, gap_end; unsigned long gap; MA_STATE(mas, &mm->mm_mt, 0, 0); + validate_mm_mt(mm, NULL); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) @@ -2207,11 +2210,10 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) if (mas_get_unmapped_area_rev(&mas, info->low_limit, info->high_limit, length)) return -ENOMEM; + rcu_read_unlock(); - gap = mas.index; - // Not sure why this is needed.. - if (mas.max > info->high_limit) - gap = ((gap) & ~info->align_mask) + info->align_offset; + gap = (mas.index + info->align_mask) & ~info->align_mask; + gap -= info->align_offset & info->align_mask; /* * Adjust search limits by the desired length. * See implementation comment at top of unmapped_area(). @@ -2306,8 +2308,8 @@ found_highest: info->low_limit, length); pr_err("mas.min %lu max %lu mas.last %lu\n", mas.min, mas.max, mas.last); - pr_err("mas.index %lu align %lu offset %lu\n", mas.index, - info->align_offset, info->align_mask); + pr_err("mas.index %lu align mask %lu offset %lu\n", mas.index, + info->align_mask, info->align_offset); pr_err("rb_find_vma find on %lu => %px (%px)\n", mas.index, rb_find_vma(mm, mas.index), vma); mt_dump(&mm->mm_mt); @@ -2318,8 +2320,8 @@ found_highest: dv = dv->vm_next; } } + VM_BUG_ON(gap != gap_end); } - VM_BUG_ON(gap != gap_end); return gap_end; } @@ -2374,6 +2376,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; + info.align_offset = 0; return vm_unmapped_area(&info); } #endif @@ -2415,6 +2418,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; + info.align_offset = 0; addr = vm_unmapped_area(&info); /* @@ -2665,6 +2669,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) unsigned long gap_addr; int error = 0; + validate_mm_mt(mm, NULL); if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; @@ -2742,6 +2747,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); + validate_mm_mt(mm, NULL); return error; } #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ @@ -2986,6 +2992,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, { struct vm_area_struct *new; int err; + validate_mm_mt(mm, NULL); if (vma->vm_ops && vma->vm_ops->split) { err = vma->vm_ops->split(vma, addr); @@ -3038,6 +3045,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, mpol_put(vma_policy(new)); out_free_vma: vm_area_free(new); + validate_mm_mt(mm, NULL); return err; } @@ -3325,6 +3333,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla pgoff_t pgoff = addr >> PAGE_SHIFT; int error; unsigned long mapped_addr; + validate_mm_mt(mm, NULL); /* Until we need other flags, refuse anything except VM_EXEC. */ if ((flags & (~VM_EXEC)) != 0) @@ -3389,6 +3398,7 @@ out: if (flags & VM_LOCKED) mm->locked_vm += (len >> PAGE_SHIFT); vma->vm_flags |= VM_SOFTDIRTY; + validate_mm_mt(mm, NULL); return 0; } @@ -3492,6 +3502,7 @@ void exit_mmap(struct mm_struct *mm) nr_accounted += vma_pages(vma); vma = remove_vma(vma); } + mtree_destroy(&mm->mm_mt); vm_unacct_memory(nr_accounted); } @@ -3559,6 +3570,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, bool faulted_in_anon_vma = true; unsigned long index = addr; + validate_mm_mt(mm, NULL); /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. @@ -3615,6 +3627,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; } + validate_mm_mt(mm, NULL); return new_vma; out_free_mempol: @@ -3622,6 +3635,7 @@ out_free_mempol: out_free_vma: vm_area_free(new_vma); out: + validate_mm_mt(mm, NULL); return NULL; } @@ -3744,6 +3758,7 @@ static struct vm_area_struct *__install_special_mapping( int ret; struct vm_area_struct *vma; + validate_mm_mt(mm, NULL); vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); @@ -3765,10 +3780,12 @@ static struct vm_area_struct *__install_special_mapping( perf_event_mmap(vma); + validate_mm_mt(mm, NULL); return vma; out: vm_area_free(vma); + validate_mm_mt(mm, NULL); return ERR_PTR(ret); } -- 2.50.1