static pgd_t *tboot_pg_dir;
static struct mm_struct tboot_mm = {
.mm_rb = RB_ROOT,
- .mm_mt = MTREE_INIT(mm_mt, MAPLE_ALLOC_RANGE),
+ .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, tboot_mm.mmap_lock),
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
struct mm_struct efi_mm = {
.mm_rb = RB_ROOT,
- .mm_mt = MTREE_INIT(mm_mt, MAPLE_ALLOC_RANGE),
+ .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
.write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
unsigned long cpu_bitmap[];
};
+#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN)
extern struct mm_struct init_mm;
/* Pointer magic because the dynamic array size confuses some compilers. */
{
mm->mmap = NULL;
mm->mm_rb = RB_ROOT;
- mt_init_flags(&mm->mm_mt, MAPLE_ALLOC_RANGE);
+ mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
+ mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
mm->vmacache_seqnum = 0;
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
*/
struct mm_struct init_mm = {
.mm_rb = RB_ROOT,
- .mm_mt = MTREE_INIT(mm_mt, MAPLE_ALLOC_RANGE),
+ .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock),
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
#endif
mas->index = vma->vm_start;
mas->last = vma->vm_end - 1;
- mas_lock(mas);
ret = mas_store_gfp(mas, vma, GFP_KERNEL);
- mas_unlock(mas);
return ret;
}
#endif
mas->index = vma->vm_start;
mas->last = vma->vm_end - 1;
- mas_lock(mas);
ret = mas_store_gfp(mas, NULL, GFP_KERNEL);
- mas_unlock(mas);
return ret;
}
static inline void vma_mt_szero(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
+ MA_STATE(mas, &mm->mm_mt, start, end - 1);
+
trace_vma_mt_szero(mm, start, end);
- mtree_store_range(&mm->mm_mt, start, end - 1, NULL, GFP_KERNEL);
+ mas_erase(&mas);
}
/*
static inline
void vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma)
{
+ MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end - 1);
+
trace_vma_mt_store(mm, vma);
- mtree_store_range(&mm->mm_mt, vma->vm_start, vma->vm_end - 1, vma,
- GFP_KERNEL);
+ mas_store_gfp(&mas, vma, GFP_KERNEL);
}
void vma_store(struct mm_struct *mm, struct vm_area_struct *vma) {
}
trace_exit_mmap(mm);
- mtree_destroy(&mm->mm_mt);
+ /* Take the mmap lock to satisfy lockdep. Nobody else can see it */
+ mmap_write_lock(mm);
+ __mt_destroy(&mm->mm_mt);
+ mmap_write_unlock(mm);
+
vm_unacct_memory(nr_accounted);
}