return __split_vma(mm, vma, addr, new_below);
}
+static inline void munmap_sidetree(struct vm_area_struct *vma,
+ struct ma_state *mas_detach)
+{
+ vma_mas_store(vma, mas_detach);
+ if (vma->vm_flags & VM_LOCKED) {
+ vma->vm_mm->locked_vm -= vma_pages(vma);
+ munlock_vma_pages_all(vma);
+ }
+}
+
/*
* do_mas_align_munmap() - munmap the aligned region from @start to @end.
* @mas: The maple_state, ideally set up to alter the correct tree location.
struct mm_struct *mm, unsigned long start,
unsigned long end, struct list_head *uf, bool downgrade)
{
- struct vm_area_struct *prev, *next;
+ struct vm_area_struct *prev, *next = NULL;
struct maple_tree mt_detach;
int count = 0;
MA_STATE(mas_detach, &mt_detach, start, end - 1);
mt_set_external_lock(&mt_detach, &mm->mmap_lock);
mas->last = end - 1;
- prev = next = NULL;
/*
* If we need to split any vma, do it now to save pain later.
*
mas_for_each(mas, next, end - 1) {
/* Does it split the end? */
if (next->vm_end > end) {
+ struct vm_area_struct *split;
int error;
- struct vm_area_struct *tmp = next;
error = __split_vma(mm, next, end, 1);
if (error)
return error;
mas_set(mas, end);
- next = mas_prev(mas, 0);
- if (tmp == vma)
- vma = next;
+ split = mas_prev(mas, 0);
+ munmap_sidetree(split, &mas_detach);
+ count++;
+ if (vma == next)
+ vma = split;
+ break;
}
count++;
+ munmap_sidetree(next, &mas_detach);
#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
BUG_ON(next->vm_start < start);
BUG_ON(next->vm_start > end);
#endif
- vma_mas_store(next, &mas_detach);
- if (next->vm_flags & VM_LOCKED) {
- mm->locked_vm -= vma_pages(next);
- munlock_vma_pages_all(next);
- }
}
- next = mas_find(mas, ULONG_MAX);
+ if (!next)
+ next = mas_next(mas, ULONG_MAX);
+
if (unlikely(uf)) {
/*
* If userfaultfd_unmap_prep returns an error the vmas