return do_vmi_munmap(&vmi, mm, start, len, uf, false);
 }
 
-unsigned long mmap_region(struct file *file, unsigned long addr,
+static unsigned long __mmap_region(struct file *file, unsigned long addr,
                unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
                struct list_head *uf)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma = NULL;
        pgoff_t pglen = PHYS_PFN(len);
-       struct vm_area_struct *merge;
        unsigned long charged = 0;
        struct vma_munmap_struct vms;
        struct ma_state mas_detach;
        struct maple_tree mt_detach;
        unsigned long end = addr + len;
-       bool writable_file_mapping = false;
        int error;
        VMA_ITERATOR(vmi, mm, addr);
        VMG_STATE(vmg, mm, &vmi, addr, end, vm_flags, pgoff);
        vm_flags_init(vma, vm_flags);
        vma->vm_page_prot = vm_get_page_prot(vm_flags);
 
+       if (vma_iter_prealloc(&vmi, vma)) {
+               error = -ENOMEM;
+               goto free_vma;
+       }
+
        if (file) {
                vma->vm_file = get_file(file);
                error = mmap_file(file, vma);
                if (error)
-                       goto unmap_and_free_vma;
-
-               if (vma_is_shared_maywrite(vma)) {
-                       error = mapping_map_writable(file->f_mapping);
-                       if (error)
-                               goto close_and_free_vma;
-
-                       writable_file_mapping = true;
-               }
+                       goto unmap_and_free_file_vma;
 
+               /* Drivers cannot alter the address of the VMA. */
+               WARN_ON_ONCE(addr != vma->vm_start);
                /*
-                * Expansion is handled above, merging is handled below.
-                * Drivers should not alter the address of the VMA.
+                * Drivers should not permit writability when previously it was
+                * disallowed.
                 */
-               if (WARN_ON((addr != vma->vm_start))) {
-                       error = -EINVAL;
-                       goto close_and_free_vma;
-               }
+               VM_WARN_ON_ONCE(vm_flags != vma->vm_flags &&
+                               !(vm_flags & VM_MAYWRITE) &&
+                               (vma->vm_flags & VM_MAYWRITE));
 
                vma_iter_config(&vmi, addr, end);
                /*
                 * vma again as we may succeed this time.
                 */
                if (unlikely(vm_flags != vma->vm_flags && vmg.prev)) {
+                       struct vm_area_struct *merge;
+
                        vmg.flags = vma->vm_flags;
                        /* If this fails, state is reset ready for a reattempt. */
                        merge = vma_merge_new_range(&vmg);
                                vma = merge;
                                /* Update vm_flags to pick up the change. */
                                vm_flags = vma->vm_flags;
-                               goto unmap_writable;
+                               goto file_expanded;
                        }
                        vma_iter_config(&vmi, addr, end);
                }
        } else if (vm_flags & VM_SHARED) {
                error = shmem_zero_setup(vma);
                if (error)
-                       goto free_vma;
+                       goto free_iter_vma;
        } else {
                vma_set_anonymous(vma);
        }
 
-       if (map_deny_write_exec(vma->vm_flags, vma->vm_flags)) {
-               error = -EACCES;
-               goto close_and_free_vma;
-       }
-
-       /* Allow architectures to sanity-check the vm_flags */
-       if (!arch_validate_flags(vma->vm_flags)) {
-               error = -EINVAL;
-               goto close_and_free_vma;
-       }
-
-       if (vma_iter_prealloc(&vmi, vma)) {
-               error = -ENOMEM;
-               goto close_and_free_vma;
-       }
+#ifdef CONFIG_SPARC64
+       /* TODO: Fix SPARC ADI! */
+       WARN_ON_ONCE(!arch_validate_flags(vm_flags));
+#endif
 
        /* Lock the VMA since it is modified after insertion into VMA tree */
        vma_start_write(vma);
         */
        khugepaged_enter_vma(vma, vma->vm_flags);
 
-       /* Once vma denies write, undo our temporary denial count */
-unmap_writable:
-       if (writable_file_mapping)
-               mapping_unmap_writable(file->f_mapping);
+file_expanded:
        file = vma->vm_file;
        ksm_add_vma(vma);
 expanded:
 
        vma_set_page_prot(vma);
 
-       validate_mm(mm);
        return addr;
 
-close_and_free_vma:
-       vma_close(vma);
-
-       if (file || vma->vm_file) {
-unmap_and_free_vma:
-               fput(vma->vm_file);
-               vma->vm_file = NULL;
+unmap_and_free_file_vma:
+       fput(vma->vm_file);
+       vma->vm_file = NULL;
 
-               vma_iter_set(&vmi, vma->vm_end);
-               /* Undo any partial mapping done by a device driver. */
-               unmap_region(&vmi.mas, vma, vmg.prev, vmg.next);
-       }
-       if (writable_file_mapping)
-               mapping_unmap_writable(file->f_mapping);
+       vma_iter_set(&vmi, vma->vm_end);
+       /* Undo any partial mapping done by a device driver. */
+       unmap_region(&vmi.mas, vma, vmg.prev, vmg.next);
+free_iter_vma:
+       vma_iter_free(&vmi);
 free_vma:
        vm_area_free(vma);
 unacct_error:
 abort_munmap:
        vms_abort_munmap_vmas(&vms, &mas_detach);
 gather_failed:
-       validate_mm(mm);
        return error;
 }
 
+unsigned long mmap_region(struct file *file, unsigned long addr,
+                         unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
+                         struct list_head *uf)
+{
+       unsigned long ret;
+       bool writable_file_mapping = false;
+
+       /* Check to see if MDWE is applicable. */
+       if (map_deny_write_exec(vm_flags, vm_flags))
+               return -EACCES;
+
+       /* Allow architectures to sanity-check the vm_flags. */
+       if (!arch_validate_flags(vm_flags))
+               return -EINVAL;
+
+       /* Map writable and ensure this isn't a sealed memfd. */
+       if (file && is_shared_maywrite(vm_flags)) {
+               int error = mapping_map_writable(file->f_mapping);
+
+               if (error)
+                       return error;
+               writable_file_mapping = true;
+       }
+
+       ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
+
+       /* Clear our write mapping regardless of error. */
+       if (writable_file_mapping)
+               mapping_unmap_writable(file->f_mapping);
+
+       validate_mm(current->mm);
+       return ret;
+}
+
 static int __vm_munmap(unsigned long start, size_t len, bool unlock)
 {
        int ret;