struct mm_slot, mm_list);
spin_unlock(&ksm_mmlist_lock);
- for (mm_slot = ksm_scan.mm_slot;
- mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
+ for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head;
+ mm_slot = ksm_scan.mm_slot) {
+ MA_STATE(mas, &mm_slot->mm->mm_mt, 0, 0);
+
mm = mm_slot->mm;
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ rcu_read_lock();
+ mas_for_each(&mas, vma, ULONG_MAX) {
if (ksm_test_exit(mm))
break;
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
}
remove_trailing_rmap_items(&mm_slot->rmap_list);
+ rcu_read_unlock();
mmap_read_unlock(mm);
spin_lock(&ksm_mmlist_lock);
return 0;
error:
+ rcu_read_unlock();
mmap_read_unlock(mm);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = &ksm_mm_head;
struct vm_area_struct *vma;
struct rmap_item *rmap_item;
int nid;
+ MA_STATE(mas, NULL, 0, 0);
if (list_empty(&ksm_mm_head.mm_list))
return NULL;
}
mm = slot->mm;
+ mas.tree = &mm->mm_mt;
+
mmap_read_lock(mm);
+ rcu_read_lock();
if (ksm_test_exit(mm))
- vma = NULL;
- else
- vma = find_vma(mm, ksm_scan.address);
+ goto no_vmas;
- for (; vma; vma = vma->vm_next) {
+ mas_set(&mas, ksm_scan.address);
+ mas_for_each(&mas, vma, ULONG_MAX) {
if (!(vma->vm_flags & VM_MERGEABLE))
continue;
if (ksm_scan.address < vma->vm_start)
ksm_scan.address += PAGE_SIZE;
} else
put_page(*page);
+ rcu_read_unlock();
mmap_read_unlock(mm);
return rmap_item;
}
}
if (ksm_test_exit(mm)) {
+no_vmas:
ksm_scan.address = 0;
ksm_scan.rmap_list = &slot->rmap_list;
}
free_mm_slot(slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+ rcu_read_unlock();
mmap_read_unlock(mm);
mmdrop(mm);
} else {
+ rcu_read_unlock();
mmap_read_unlock(mm);
/*
* mmap_read_unlock(mm) first because after