MA_STATE(mas, &mm->mm_mt, 0, 0);
- mas_for_each(mas, vma, ULONG_MAX) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
#ifdef CONFIG_DEBUG_VM_RB
struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc;
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
- /* Note: mas must be pointing to the expanding VMA */
vma_mas_store(vma, mas);
if (file) {
/* Update high watermark before we lower total_vm */
update_hiwater_vm(mm);
- mas_for_each(mas, vma, ULONG_MAX) {
+ mas_for_each(mas, vma, -1) {
long nrpages = vma_pages(vma);
if (vma->vm_flags & VM_ACCOUNT)
struct maple_tree mt_detach = MTREE_INIT(mt_detach, MAPLE_ALLOC_RANGE);
unsigned long max;
MA_STATE(dst, &mt_detach, start, start);
+ struct ma_state tmp;
/* we have start < vma->vm_end */
/*
if (error)
return error;
prev = vma;
- vma = vma_next(mm, prev);
+ // Split invalidated node, reset.
mas->index = start;
mas_reset(mas);
+ vma = mas_walk(mas);
} else {
- prev = vma_prev(mm, vma);
+ tmp = *mas;
+ prev = mas_prev(&tmp, 0);
}
if (vma->vm_end >= end)
last = vma;
- else
- last = find_vma_intersection(mm, end - 1, end);
+ else {
+ tmp = *mas;
+ last = mas_next(&tmp, -1);
+ }
/* Does it split the last one? */
if (last && end < last->vm_end) {
int error = __split_vma(mm, last, end, 1);
if (error)
return error;
- vma = vma_next(mm, prev);
+ // Split invalidated node, reset.
+ mas->index = start;
mas_reset(mas);
+ vma = mas_walk(mas);
}
unsigned long max = USER_PGTABLES_CEILING;
pgoff_t vm_pgoff;
int error;
- struct ma_state ma_prev;
+ struct ma_state ma_prev, tmp;
MA_STATE(mas, &mm->mm_mt, addr, end - 1);
/* Check against address space limit. */
vm_flags |= VM_ACCOUNT;
}
-
- ma_prev = mas;
+ mas_reset(&mas);
+ mas_set_range(&mas, addr, end - 1);
if (vm_flags & VM_SPECIAL) {
+ ma_prev = mas;
prev = mas_prev(&ma_prev, 0);
goto cannot_expand;
}
/* Attempt to expand an old mapping */
/* Check next */
- next = mas_next(&ma_prev, ULONG_MAX);
+ tmp = mas;
+ next = mas_next(&tmp, ULONG_MAX);
if (next) {
max = next->vm_start;
-
if (next->vm_start == end && vma_policy(next) &&
can_vma_merge_before(next, vm_flags, NULL, file,
pgoff + pglen, NULL_VM_UFFD_CTX)) {
}
/* Check prev */
+ ma_prev = tmp;
prev = mas_prev(&ma_prev, 0);
if (prev && prev->vm_end == addr && !vma_policy(prev) &&
can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
NULL_VM_UFFD_CTX)) {
merge_start = prev->vm_start;
vma = prev;
+ tmp = ma_prev;
vm_pgoff = prev->vm_pgoff;
}
/* Actually expand, if possible */
if (vma &&
- !vma_expand(&ma_prev, vma, merge_start, merge_end, vm_pgoff, next)) {
+ !vma_expand(&tmp, vma, merge_start, merge_end, vm_pgoff, next)) {
khugepaged_enter_vma_merge(prev, vm_flags);
goto expanded;
}
goto free_vma;
}
+ // Very likely a shorter walk.
+ mas = ma_prev;
+ mas_set_range(&mas, addr, end - 1);
+ mas_walk(&mas);
vma_mas_link(mm, vma, &mas);
/* Once vma denies write, undo our temporary denial count */
if (file) {
vma->vm_file = NULL;
fput(file);
- mas.index = mas.last = addr;
- vma = mas_walk(&mas);
/* Undo any partial mapping done by a device driver. */
unmap_region(mm, vma, &mas, vma->vm_start, vma->vm_end, prev, max);
charged = 0;
*/
mas_reset(&mas);
mas_set(&mas, 0);
- mas_for_each(&mas, vma, ULONG_MAX) {
+ mas_for_each(&mas, vma, -1) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
remove_vma(vma);
}
mas_reset(&mas);
+ mas_set(&mas, 0);
mas_for_each(&mas, vma, ULONG_MAX) {
if (signal_pending(current))
goto out_unlock;
}
mas_reset(&mas);
+ mas_set(&mas, 0);
mas_for_each(&mas, vma, ULONG_MAX) {
if (signal_pending(current))
goto out_unlock;