for_each_vma_range(*vmi, next, end) {
                /* Does it split the end? */
                if (next->vm_end > end) {
-                       struct vm_area_struct *split;
-
-                       error = __split_vma(vmi, next, end, 1);
+                       error = __split_vma(vmi, next, end, 0);
                        if (error)
                                goto end_split_failed;
-
-                       split = vma_prev(vmi);
-                       error = munmap_sidetree(split, &mas_detach);
-                       if (error)
-                               goto munmap_sidetree_failed;
-
-                       count++;
-                       if (vma == next)
-                               vma = split;
-                       break;
                }
                error = munmap_sidetree(next, &mas_detach);
                if (error)
 #endif
        }
 
-       if (!next)
-               next = vma_next(vmi);
-
+       next = vma_next(vmi);
        if (unlikely(uf)) {
                /*
                 * If userfaultfd_unmap_prep returns an error the vmas
 
        unsigned long vm_flags = vma->vm_flags;
        unsigned long new_pgoff;
        unsigned long moved_len;
-       unsigned long excess = 0;
+       unsigned long account_start = 0;
+       unsigned long account_end = 0;
        unsigned long hiwater_vm;
-       int split = 0;
        int err = 0;
        bool need_rmap_locks;
+       struct vma_iterator vmi;
 
        /*
         * We'd prefer to avoid failure later on in do_munmap:
        /* Conceal VM_ACCOUNT so old reservation is not undone */
        if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
                vma->vm_flags &= ~VM_ACCOUNT;
-               excess = vma->vm_end - vma->vm_start - old_len;
-               if (old_addr > vma->vm_start &&
-                   old_addr + old_len < vma->vm_end)
-                       split = 1;
+               if (vma->vm_start < old_addr)
+                       account_start = vma->vm_start;
+               if (vma->vm_end > old_addr + old_len)
+                       account_end = vma->vm_end;
        }
 
        /*
                return new_addr;
        }
 
-       if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
+       vma_iter_init(&vmi, mm, old_addr);
+       if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
                /* OOM: unable to split vma, just get accounts right */
                if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
                        vm_acct_memory(old_len >> PAGE_SHIFT);
-               excess = 0;
+               account_start = account_end = 0;
        }
 
        if (vm_flags & VM_LOCKED) {
        mm->hiwater_vm = hiwater_vm;
 
        /* Restore VM_ACCOUNT if one or two pieces of vma left */
-       if (excess) {
+       if (account_start) {
+               vma = vma_prev(&vmi);
+               vma->vm_flags |= VM_ACCOUNT;
+       }
+
+       if (account_end) {
+               vma = vma_next(&vmi);
                vma->vm_flags |= VM_ACCOUNT;
-               if (split)
-                       find_vma(mm, vma->vm_end)->vm_flags |= VM_ACCOUNT;
        }
 
        return new_addr;