static pgd_t *tboot_pg_dir;
static struct mm_struct tboot_mm = {
- .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, tboot_mm.mmap_lock),
+ .mm_mt = MTREE_INIT(mm_mt, MM_MT_FLAGS),
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
extern unsigned long screen_info_table;
struct mm_struct efi_mm = {
- .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
+ .mm_mt = MTREE_INIT(mm_mt, MM_MT_FLAGS),
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
.write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
{
vmi->mas.index = vma->vm_start;
vmi->mas.last = vma->vm_end - 1;
+ mas_lock(&vmi->mas);
mas_store(&vmi->mas, vma);
+ mas_unlock(&vmi->mas);
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
unsigned long cpu_bitmap[];
};
-#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
- MT_FLAGS_USE_RCU)
+#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU)
extern struct mm_struct init_mm;
/* Pointer magic because the dynamic array size confuses some compilers. */
mm->stack_vm = oldmm->stack_vm;
/* Use __mt_dup() to efficiently build an identical maple tree. */
- retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL);
+ mas_lock(&vmi.mas);
+ rcu_read_lock();
+ retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL | GFP_NOWAIT);
+ rcu_read_unlock();
+ mas_unlock(&vmi.mas);
if (unlikely(retval))
goto out;
struct user_namespace *user_ns)
{
mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
- mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
seqcount_init(&mm->write_protect_seq);
* and size this cpu_bitmask to NR_CPUS.
*/
struct mm_struct init_mm = {
- .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock),
+ .mm_mt = MTREE_INIT(mm_mt, MM_MT_FLAGS),
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
/* Can happen if dup_mmap() received an OOM */
mmap_read_unlock(mm);
mmap_write_lock(mm);
+ mas_lock(&vmi.mas);
goto destroy;
}
*/
set_bit(MMF_OOM_SKIP, &mm->flags);
mmap_write_lock(mm);
+ mas_lock(&vmi.mas);
mt_clear_in_rcu(&mm->mm_mt);
vma_iter_set(&vmi, vma->vm_end);
free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
trace_exit_mmap(mm);
destroy:
__mt_destroy(&mm->mm_mt);
+ mas_unlock(&vmi.mas);
mmap_write_unlock(mm);
vm_unacct_memory(nr_accounted);
}
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
+ rcu_read_lock();
mt_validate(&mm->mm_mt);
for_each_vma(vmi, vma) {
#ifdef CONFIG_DEBUG_VM_RB
break;
}
}
+ rcu_read_unlock();
+
if (i != mm->map_count) {
pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
bug = 1;
struct vm_area_struct *vma, gfp_t gfp)
{
+ mas_lock(&vmi->mas);
if (vmi->mas.status != ma_start &&
((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
vma_iter_invalidate(vmi);
__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
mas_store_gfp(&vmi->mas, vma, gfp);
+ mas_unlock(&vmi->mas);
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
static inline int vma_iter_prealloc(struct vma_iterator *vmi,
struct vm_area_struct *vma)
{
- return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
+ int ret;
+
+ mas_lock(&vmi->mas);
+ ret = mas_preallocate(&vmi->mas, vma, GFP_KERNEL | GFP_NOWAIT);
+ mas_unlock(&vmi->mas);
+ return ret;
}
static inline void vma_iter_clear(struct vma_iterator *vmi)
{
+ mas_lock(&vmi->mas);
mas_store_prealloc(&vmi->mas, NULL);
+ mas_unlock(&vmi->mas);
}
static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
struct vm_area_struct *vma)
{
+ mas_lock(&vmi->mas);
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
vmi->mas.index > vma->vm_start)) {
__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
mas_store_prealloc(&vmi->mas, vma);
+ mas_unlock(&vmi->mas);
}
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)