static inline void thp_split_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
- for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
vma->vm_flags &= ~VM_HUGEPAGE;
vma->vm_flags |= VM_NOHUGEPAGE;
walk_page_vma(vma, &thp_split_walk_ops, NULL);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int ret;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
MADV_UNMERGEABLE, &vma->vm_flags);
if (ret)