struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
+
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
unsigned long size = vma->vm_end - vma->vm_start;
if (vma_is_special_mapping(vma, &vvar_mapping))
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
mmap_write_lock(mm);
/*
* We could search vma near context.vdso, but it's a slowpath,
* so let's explicitly check all VMAs to be completely sure.
*/
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
if (vma_is_special_mapping(vma, &vdso_mapping) ||
vma_is_special_mapping(vma, &vvar_mapping)) {
+ mas_unlock(&mas);
mmap_write_unlock(mm);
return -EEXIST;
}