struct vm_area_struct *vma, *next, *prev, *last;
struct vm_area_struct start_split, end_split;
int map_count = 0;
- //MA_STATE(mas, &mm->mm_mt, start, start);
+ MA_STATE(mas, &mm->mm_mt, start, start);
if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
arch_unmap(mm, start, end);
/* Find the first overlapping VMA */
- vma = find_vma_intersection(mm, start, end);
+ vma = mas_find(&mas, end - 1);
if (!vma)
return 0;
+ /* Check for userfaultfd now before altering the mm */
if (unlikely(uf)) {
int error = userfaultfd_unmap_prep(vma, start, end, uf);
if (start > vma->vm_start) {
if (unlikely(vma->vm_end > end)) {
// adjust the same vma twice requires a split.
- int error = __split_vma(mm, vma, start, 0);
+ int error;
+ /*
+ * Make sure that map_count on return from munmap() will
+ * not exceed its limit; but let map_count go just above
+ * its limit temporarily, to help free resources as expected.
+ */
+ if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+ error = __split_vma(mm, vma, start, 0);
if (error)
return error;
+
prev = vma;
- vma = vma_next(mm, prev);
+ mas_reset(&mas);
+ mas_set(&mas, start);
+ vma = mas_walk(&mas);
} else {
vma_shorten(vma, vma->vm_start, start, &start_split);
prev = vma;
last = &end_split;
}
-
-
- /*
- * unlock any mlock()ed ranges before detaching vmas
+ /* unlock any mlock()ed VMAs and count the number of VMAs to be
+ * detached.
*/
next = vma;
while (next && next->vm_start < end) {
next = next->vm_next;
}
- //printk("tmp %px, map count is %d\n", tmp, map_count);
-
- /* Detach vmas from the MM linked list and remove from the mm tree*/
+ /* Detach vmas from the MM linked list */
vma->vm_prev = NULL;
if (prev)
prev->vm_next = next;
last = vma;
last->vm_next = NULL;
+
+ /* Detach VMAs from the maple tree */
+ mas.index = start;
+ mas.last = end - 1;
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+
+ /* Update map_count */
mm->map_count -= map_count;
- vma_mt_szero(mm, start, end);
+ /* Downgrade the lock, if possible */
if (next && (next->vm_flags & VM_GROWSDOWN))
downgrade = false;
else if (prev && (prev->vm_flags & VM_GROWSUP))
downgrade = false;
-
if (downgrade)
mmap_write_downgrade(mm);
- /* vma -> last is a separate lined list. Add start_split and end_split
- * if necessary */
+ /* Actual unmap the region */
unmap_region(mm, vma, prev, start, end);
+ /* Take care of accounting for orphan VMAs, and remove from the list. */
if (vma == &start_split) {
if (vma->vm_flags & VM_ACCOUNT) {
long nrpages = vma_pages(vma);
vma = vma->vm_next;
}
- // Cleanup accounting.
if (last == &end_split) {
if (last->vm_flags & VM_ACCOUNT) {
long nrpages = vma_pages(last);
vma = NULL;
}
- /* Fix up all other VM information */
+ /* Clean up accounting and free VMAs */
if (vma)
remove_vma_list(mm, vma);