#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/mm_mt.h>
-#define CONFIG_DEBUG_MAPLE_TREE
-#undef CONFIG_DEBUG_MAPLE_TREE_VERBOSE
-#define CONFIG_DEBUG_VM_RB 1
extern void mt_validate(struct maple_tree *mt);
#ifndef arch_mmap_check
#if defined(CONFIG_DEBUG_MAPLE_TREE)
extern void mt_dump(const struct maple_tree *mt);
-#if 0
-static void __vma_mt_dump(struct mm_struct *mm)
-{
- struct vm_area_struct *entry = NULL;
-
- MA_STATE(mas, &mm->mm_mt, 0, 0);
-
- rcu_read_lock();
- mas_for_each(&mas, entry, ULONG_MAX) {
- if (xa_is_zero(entry))
- continue;
-
- pr_debug("vma: %lu-%lu\t%lu-%lu\n", entry->vm_start,
- entry->vm_end, mas.index, mas.last);
- }
- rcu_read_unlock();
-}
-#endif
-
-/*
- * Validate the maple tree
- */
-static void validate_mm_mt(struct mm_struct *mm,
- struct vm_area_struct *ignore)
+/* Validate the maple tree */
+static void validate_mm_mt(struct mm_struct *mm)
{
struct maple_tree *mt = &mm->mm_mt;
struct vm_area_struct *vma_mt, *vma = mm->mmap;
if (xa_is_zero(vma_mt))
continue;
- if (vma && vma == ignore)
- vma = vma->vm_next;
-
if (!vma)
break;
(vma->vm_start != vma_mt->vm_start) ||
(vma->vm_end != vma_mt->vm_end) ||
(vma->vm_start != mas.index) ||
- (vma->vm_end -1 != mas.last)){
+ (vma->vm_end - 1 != mas.last)) {
pr_emerg("issue in %s\n", current->comm);
dump_stack();
+#ifdef CONFIG_DEBUG_VM
dump_vma(vma_mt);
pr_emerg("and next in rb\n");
dump_vma(vma->vm_next);
+#endif
pr_emerg("mt piv: %px %lu - %lu\n", vma_mt,
mas.index, mas.last);
pr_emerg("mt vma: %px %lu - %lu\n", vma_mt,
vma_mt->vm_start, vma_mt->vm_end);
pr_emerg("rb vma: %px %lu - %lu\n", vma,
vma->vm_start, vma->vm_end);
- if (ignore)
- pr_emerg("rb_skip %px %lu - %lu\n", ignore,
- ignore->vm_start, ignore->vm_end);
pr_emerg("rb->next = %px %lu - %lu\n", vma->vm_next,
vma->vm_next->vm_start, vma->vm_next->vm_end);
}
#else
#define validate_mm_rb(root, ignore) do { } while (0)
-#define validate_mm_mt(root, ignore) do { } while (0)
+#define validate_mm_mt(root) do { } while (0)
#define validate_mm(mm) do { } while (0)
#endif
flush_dcache_mmap_unlock(mapping);
}
}
-static void __vma_mt_erase(struct mm_struct *mm, struct vm_area_struct *vma)
+/* Private
+ * vma_mt_erase() - erase a VMA entry from the maple tree.
+ *
+ * @mm: The struct_mm
+ * @vma: The vm_area_struct to erase from the maple tree.
+ */
+static inline void vma_mt_erase(struct mm_struct *mm, struct vm_area_struct *vma)
{
- trace___vma_mt_erase(mm, vma);
-#if defined(CONFIG_DEBUG_MAPLE_TREE_VERBOSE)
- printk("mt_mod %px, (%px), ERASE, %lu, %lu,", mm, vma, vma->vm_start,
- vma->vm_end - 1);
-#endif
+ trace_vma_mt_erase(mm, vma);
mtree_erase(&mm->mm_mt, vma->vm_start);
- mt_validate(&mm->mm_mt);
}
-static void __vma_mt_szero(struct mm_struct *mm, unsigned long start,
+/* Private
+ * vma_mt_szero() - Set a given range to zero. Used when modifying a
+ * vm_area_struct start or end.
+ *
+ * @mm: The struct_mm
+ * @start: The start address to zero
+ * @end: The end address to zero.
+ */
+static inline void vma_mt_szero(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
- trace___vma_mt_szero(mm, start, end);
-#if defined(CONFIG_DEBUG_MAPLE_TREE_VERBOSE)
- printk("mt_mod %px, (%px), SNULL, %lu, %lu,", mm, NULL, start,
- end - 1);
-#endif
+ trace_vma_mt_szero(mm, start, end);
mtree_store_range(&mm->mm_mt, start, end - 1, NULL, GFP_KERNEL);
}
-static void __vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma)
+/* Private
+ * vma_mt_store() - Store a given vm_area_struct in the maple tree.
+ *
+ * @mm: The struct_mm
+ * @vma: The vm_area_struct to store in the maple tree.
+ */
+static inline void vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma)
{
- trace___vma_mt_store(mm, vma);
-#if defined(CONFIG_DEBUG_MAPLE_TREE_VERBOSE)
- printk("mt_mod %px, (%px), STORE, %lu, %lu,", mm, vma, vma->vm_start,
- vma->vm_end - 1);
-#endif
+ trace_vma_mt_store(mm, vma);
mtree_store_range(&mm->mm_mt, vma->vm_start, vma->vm_end - 1, vma,
GFP_KERNEL);
- mt_validate(&mm->mm_mt);
}
+/* External interface to store a vma into an @mm. Used in fork.
+ *
+ * @mm: The struct_mm
+ * @vma: The vm_area_struct to store in the maple tree.
+ */
void vma_store(struct mm_struct *mm, struct vm_area_struct *vma)
{
- __vma_mt_store(mm, vma);
+ vma_mt_store(mm, vma);
}
-// LRH: Needed - update linked list, should fine.
+
static void
__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node **rb_link,
struct rb_node *rb_parent)
{
- __vma_mt_store(mm, vma);
+ vma_mt_store(mm, vma);
__vma_link_list(mm, vma, prev);
__vma_link_rb(mm, vma, rb_link, rb_parent);
}
int remove_next = 0;
validate_mm(mm);
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
if (next && !insert) {
struct vm_area_struct *exporter = NULL, *importer = NULL;
unsigned long old_start = vma->vm_start;
vma->vm_start = start;
if (old_start < start)
- __vma_mt_szero(mm, old_start, start);
+ vma_mt_szero(mm, old_start, start);
start_changed = true;
}
if (end != vma->vm_end) {
unsigned long old_end = vma->vm_end;
vma->vm_end = end;
if (old_end > end)
- __vma_mt_szero(mm, end - 1, old_end);
+ vma_mt_szero(mm, end - 1, old_end);
end_changed = true;
}
- if (end_changed || start_changed) {
- __vma_mt_store(mm, vma);
- }
+ if (end_changed || start_changed)
+ vma_mt_store(mm, vma);
vma->vm_pgoff = pgoff;
if (adjust_next) {
next->vm_pgoff += adjust_next >> PAGE_SHIFT;
// the vma_store is necessary as the adjust_next may be
// negative and expand backwards.
- __vma_mt_store(mm, next);
+ vma_mt_store(mm, next);
}
if (file) {
/*
* vma_merge has merged next into vma, and needs
* us to remove next before dropping the locks.
+ * Since we have expanded over this vma, the maple tree will
+ * have overwritten by storing the value
*/
/* Since we have expanded over this vma, the maple tree will
* have overwritten by storing the value */
uprobe_mmap(insert);
validate_mm(mm);
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
return 0;
}
struct vm_area_struct *area, *next;
int err;
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
khugepaged_enter_vma_merge(area, vm_flags);
return area;
}
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
return NULL;
}
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
/* Check against address space limit. */
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
vma_set_page_prot(vma);
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
return addr;
unmap_and_free_vma:
unacct_error:
if (charged)
vm_unacct_memory(charged);
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
return error;
}
struct vm_area_struct *vma;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
unsigned long gap;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
- // Maple tree is self contained.
+ /* Maple tree is self contained. */
rcu_read_lock();
- if (mas_get_unmapped_area(&mas, info->low_limit,
- info->high_limit - 1, length))
+ if (mas_get_unmapped_area(&mas, info->low_limit, info->high_limit - 1,
+ length)) {
+ rcu_read_unlock();
return -ENOMEM;
+ }
rcu_read_unlock();
gap = mas.index;
gap += (info->align_offset - gap) & info->align_mask;
struct vm_area_struct *vma = NULL;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
unsigned long gap;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
- validate_mm_mt(mm, NULL);
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
+ validate_mm_mt(mm);
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
rcu_read_lock();
if (mas_get_unmapped_area_rev(&mas, info->low_limit, info->high_limit,
- length))
+ length)) {
+ rcu_read_unlock();
return -ENOMEM;
-
+ }
rcu_read_unlock();
gap = (mas.index + info->align_mask) & ~info->align_mask;
gap -= info->align_offset & info->align_mask;
VM_BUG_ON(gap_end < gap_start);
if (gap != gap_end) {
- struct vm_area_struct *rb_find_vma(struct mm_struct *mm, unsigned long addr);
pr_err("%s: %px Gap was found: mt %lu gap_end %lu\n", __func__,
mm, gap, gap_end);
pr_err("window was %lu - %lu size %lu\n", info->high_limit,
pr_err("mas.index %lu align mask %lu offset %lu\n", mas.index,
info->align_mask, info->align_offset);
pr_err("rb_find_vma find on %lu => %px (%px)\n", mas.index,
- rb_find_vma(mm, mas.index), vma);
+ find_vma(mm, mas.index), vma);
+#if defined(CONFIG_DEBUG_MAPLE_TREE)
mt_dump(&mm->mm_mt);
+#endif
{
struct vm_area_struct *dv = mm->mmap;
+
while (dv) {
printk("vma %px %lu-%lu\n", dv, dv->vm_start, dv->vm_end);
dv = dv->vm_next;
vmacache_update(addr, vma);
return vma;
}
-
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *ret = rb_find_vma(mm, addr);
unsigned long gap_addr;
int error = 0;
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
return error;
}
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
grow = (vma->vm_start - address) >> PAGE_SHIFT;
error = -ENOMEM;
- if (grow > vma->vm_pgoff)
- goto no_update;
+ if (grow <= vma->vm_pgoff) {
+ error = acct_stack_growth(vma, size, grow);
+ if (!error) {
+ /*
+ * vma_gap_update() doesn't support concurrent
+ * updates, but we only hold a shared mmap_lock
+ * lock here, so we need to protect against
+ * concurrent vma expansions.
+ * anon_vma_lock_write() doesn't help here, as
+ * we don't guarantee that all growable vmas
+ * in a mm share the same root anon vma.
+ * So, we reuse mm->page_table_lock to guard
+ * against concurrent vma expansions.
+ */
+ spin_lock(&mm->page_table_lock);
+ if (vma->vm_flags & VM_LOCKED)
+ mm->locked_vm += grow;
+ vm_stat_account(mm, vma->vm_flags, grow);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
+ /* Overwrite old entry in mtree. */
+ vma_mt_store(mm, vma);
+ anon_vma_interval_tree_post_update_vma(vma);
+ vma_gap_update(vma);
+ spin_unlock(&mm->page_table_lock);
error = acct_stack_growth(vma, size, grow);
if (error)
vma->vm_prev = NULL;
do {
vma_rb_erase(vma, &mm->mm_rb);
- __vma_mt_erase(mm, vma);
+ vma_mt_erase(mm, vma);
mm->map_count--;
tail_vma = vma;
vma = vma->vm_next;
{
struct vm_area_struct *new;
int err;
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
if (vma->vm_ops && vma->vm_ops->split) {
err = vma->vm_ops->split(vma, addr);
mpol_put(vma_policy(new));
out_free_vma:
vm_area_free(new);
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
return err;
}
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
unsigned long mapped_addr;
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
vma->vm_flags |= VM_SOFTDIRTY;
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
return 0;
}
cond_resched();
}
- mtree_destroy(&mm->mm_mt);
trace_exit_mmap(mm);
-#if defined(CONFIG_DEBUG_MAPLE_TREE_VERBOSE)
- printk("mt_mod %px, (%px), DESTROY", mm, &mm->mm_mt);
-#endif
-
+ mtree_destroy(&mm->mm_mt);
vm_unacct_memory(nr_accounted);
}
&prev, &rb_link, &rb_parent))
return -ENOMEM;
- if ((overlap = mt_find(&mm->mm_mt, &start, vma->vm_end - 1)) != NULL) {
+ overlap = mt_find(&mm->mm_mt, &start, vma->vm_end - 1);
+ if (overlap) {
+
pr_err("Found vma ending at %lu\n", start - 1);
pr_err("vma : %lu => %lu-%lu\n", (unsigned long)overlap,
overlap->vm_start, overlap->vm_end - 1);
+#if defined(CONFIG_DEBUG_MAPLE_TREE)
mt_dump(&mm->mm_mt);
+#endif
BUG();
}
bool faulted_in_anon_vma = true;
unsigned long index = addr;
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
vma_link(mm, new_vma, prev, rb_link, rb_parent);
*need_rmap_locks = false;
}
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
return new_vma;
out_free_mempol:
out_free_vma:
vm_area_free(new_vma);
out:
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
return NULL;
}
int ret;
struct vm_area_struct *vma;
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
vma = vm_area_alloc(mm);
if (unlikely(vma == NULL))
return ERR_PTR(-ENOMEM);
perf_event_mmap(vma);
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
return vma;
out:
vm_area_free(vma);
- validate_mm_mt(mm, NULL);
+ validate_mm_mt(mm);
return ERR_PTR(ret);
}