__mmdrop(mm);
}
+void mm_set_in_rcu(struct mm_struct *mm);
+
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
*/
static inline void mmget(struct mm_struct *mm)
{
- mt_set_in_rcu(&mm->mm_mt);
+ if (!mt_in_rcu(&mm->mm_mt))
+ mm_set_in_rcu(mm);
atomic_inc(&mm->mm_users);
}
* happens, the result would be that the maple tree nodes would remain
* active for an extra RCU read cycle.
*/
- mt_set_in_rcu(&mm->mm_mt);
+ if (!mt_in_rcu(&mm->mm_mt))
+ mm_set_in_rcu(mm);
return atomic_inc_not_zero(&mm->mm_users);
}
rb_parent = &tmp->vm_rb;
/* Link the vma into the MT */
- mas_lock(&mas);
mas.index = tmp->vm_start;
mas.last = tmp->vm_end - 1;
mas_store(&mas, tmp);
- mas_unlock(&mas);
mm->map_count++;
if (!(tmp->vm_flags & VM_WIPEONFORK))
return gap;
}
+void mm_set_in_rcu(struct mm_struct *mm)
+{
+ if (!mt_in_rcu(&mm->mm_mt))
+ return;
+ mmap_write_lock(mm);
+ mm->mm_mt.ma_flags |= MT_FLAGS_USE_RCU;
+ mmap_write_unlock(mm);
+}
+
#ifdef CONFIG_DEBUG_VM_RB
static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
mmap_write_lock(mm);
__mt_destroy(&mm->mm_mt);
mmap_write_unlock(mm);
-
vm_unacct_memory(nr_accounted);
}