(vma->vm_end != vma_mt->vm_end) ||
(vma->vm_start != mas.index) ||
(vma->vm_end -1 != mas.last)){
+ pr_emerg("issue in %s\n", current->comm);
+ dump_stack();
+ dump_vma(vma_mt);
+ pr_emerg("and next in rb\n");
+ dump_vma(vma->vm_next);
pr_emerg("mt piv: %px %lu - %lu\n", vma_mt,
mas.index, mas.last);
pr_emerg("mt vma: %px %lu - %lu\n", vma_mt,
flush_dcache_mmap_unlock(mapping);
}
}
-
static void __vma_mt_erase(struct mm_struct *mm, struct vm_area_struct *vma)
{
-// printk("%s: mt_mod %px (%px): ERASE, %lu, %lu,\n", __func__, mm, vma,
-// vma->vm_start, vma->vm_end - 1);
+// printk("mt_mod %px, (%px), ERASE, %lu, %lu", mm, vma, vma->vm_start,
+// vma->vm_end);
mtree_erase(&mm->mm_mt, vma->vm_start);
mt_validate(&mm->mm_mt);
}
-
static void __vma_mt_szero(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
-// printk("%s: mt_mod %px (%px): SNULL, %lu, %lu,\n", __func__, mm, NULL,
-// start, end - 1);
+// printk("mt_mod %px, (NULL), SNULL, %lu, %lu", mm, start,
+// end);
mtree_store_range(&mm->mm_mt, start, end - 1, NULL, GFP_KERNEL);
}
-
static void __vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma)
{
-// printk("%s: mt_mod %px (%px): STORE, %lu, %lu,\n", __func__, mm, vma,
-// vma->vm_start, vma->vm_end - 1);
+// printk("mt_mod %px, (%px), STORE, %lu, %lu", mm, vma, vma->vm_start,
+// vma->vm_end);
mtree_store_range(&mm->mm_mt, vma->vm_start, vma->vm_end - 1, vma,
GFP_KERNEL);
mt_validate(&mm->mm_mt);
}
-
void vma_store(struct mm_struct *mm, struct vm_area_struct *vma)
{
__vma_mt_store(mm, vma);
}
-
// LRH: Needed - update linked list, should fine.
static void
__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
__vma_mt_store(mm, vma);
__vma_link_list(mm, vma, prev);
__vma_link_rb(mm, vma, rb_link, rb_parent);
- //validate_mm_mt(mm, NULL);
}
static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
validate_mm(mm);
validate_mm_mt(mm, NULL);
- //printk("%s %px %lu %lu\n", __func__, vma, start, end);
if (next && !insert) {
struct vm_area_struct *exporter = NULL, *importer = NULL;
*/
/* maple tree store is done in the __vma_link call in this
* call graph */
-// printk("insert %px %lu - %lu\n", insert, insert->vm_start, insert->vm_end);
__insert_vm_struct(mm, insert);
} else {
if (start_changed)
struct vm_area_struct *area, *next;
int err;
+ validate_mm_mt(mm, NULL);
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
khugepaged_enter_vma_merge(area, vm_flags);
return area;
}
+ validate_mm_mt(mm, NULL);
return NULL;
}
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
+ validate_mm_mt(mm, NULL);
/* Check against address space limit. */
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
vma_set_page_prot(vma);
+ validate_mm_mt(mm, NULL);
return addr;
unmap_and_free_vma:
unacct_error:
if (charged)
vm_unacct_memory(charged);
+ validate_mm_mt(mm, NULL);
return error;
}
unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma = NULL;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
unsigned long gap;
MA_STATE(mas, &mm->mm_mt, 0, 0);
+ validate_mm_mt(mm, NULL);
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
if (mas_get_unmapped_area_rev(&mas, info->low_limit, info->high_limit,
length))
return -ENOMEM;
+
rcu_read_unlock();
- gap = mas.index;
- // Not sure why this is needed..
- if (mas.max > info->high_limit)
- gap = ((gap) & ~info->align_mask) + info->align_offset;
+ gap = (mas.index + info->align_mask) & ~info->align_mask;
+ gap -= info->align_offset & info->align_mask;
/*
* Adjust search limits by the desired length.
* See implementation comment at top of unmapped_area().
info->low_limit, length);
pr_err("mas.min %lu max %lu mas.last %lu\n", mas.min, mas.max,
mas.last);
- pr_err("mas.index %lu align %lu offset %lu\n", mas.index,
- info->align_offset, info->align_mask);
+ pr_err("mas.index %lu align mask %lu offset %lu\n", mas.index,
+ info->align_mask, info->align_offset);
pr_err("rb_find_vma find on %lu => %px (%px)\n", mas.index,
rb_find_vma(mm, mas.index), vma);
mt_dump(&mm->mm_mt);
dv = dv->vm_next;
}
}
+ VM_BUG_ON(gap != gap_end);
}
- VM_BUG_ON(gap != gap_end);
return gap_end;
}
info.low_limit = mm->mmap_base;
info.high_limit = mmap_end;
info.align_mask = 0;
+ info.align_offset = 0;
return vm_unmapped_area(&info);
}
#endif
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
info.align_mask = 0;
+ info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
unsigned long gap_addr;
int error = 0;
+ validate_mm_mt(mm, NULL);
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
+ validate_mm_mt(mm, NULL);
return error;
}
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
{
struct vm_area_struct *new;
int err;
+ validate_mm_mt(mm, NULL);
if (vma->vm_ops && vma->vm_ops->split) {
err = vma->vm_ops->split(vma, addr);
mpol_put(vma_policy(new));
out_free_vma:
vm_area_free(new);
+ validate_mm_mt(mm, NULL);
return err;
}
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
unsigned long mapped_addr;
+ validate_mm_mt(mm, NULL);
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
vma->vm_flags |= VM_SOFTDIRTY;
+ validate_mm_mt(mm, NULL);
return 0;
}
nr_accounted += vma_pages(vma);
vma = remove_vma(vma);
}
+ mtree_destroy(&mm->mm_mt);
vm_unacct_memory(nr_accounted);
}
bool faulted_in_anon_vma = true;
unsigned long index = addr;
+ validate_mm_mt(mm, NULL);
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
vma_link(mm, new_vma, prev, rb_link, rb_parent);
*need_rmap_locks = false;
}
+ validate_mm_mt(mm, NULL);
return new_vma;
out_free_mempol:
out_free_vma:
vm_area_free(new_vma);
out:
+ validate_mm_mt(mm, NULL);
return NULL;
}
int ret;
struct vm_area_struct *vma;
+ validate_mm_mt(mm, NULL);
vma = vm_area_alloc(mm);
if (unlikely(vma == NULL))
return ERR_PTR(-ENOMEM);
perf_event_mmap(vma);
+ validate_mm_mt(mm, NULL);
return vma;
out:
vm_area_free(vma);
+ validate_mm_mt(mm, NULL);
return ERR_PTR(ret);
}