struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
+
mmap_read_lock(mm);
+ rcu_read_lock();
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
unsigned long size = vma->vm_end - vma->vm_start;
if (vma_is_special_mapping(vma, &vvar_mapping))
zap_page_range(vma, vma->vm_start, size);
}
+ rcu_read_unlock();
mmap_read_unlock(mm);
return 0;
}
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
mmap_write_lock(mm);
/*
* We could search vma near context.vdso, but it's a slowpath,
* so let's explicitly check all VMAs to be completely sure.
*/
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ mas_lock(&mas);
+ mas_for_each(&mas, vma, ULONG_MAX) {
if (vma_is_special_mapping(vma, &vdso_mapping) ||
vma_is_special_mapping(vma, &vvar_mapping)) {
+ mas_unlock(&mas);
mmap_write_unlock(mm);
return -EEXIST;
}
}
+ mas_unlock(&mas);
mmap_write_unlock(mm);
return map_vdso(image, addr);