{
        hva_t hva = mem->userspace_addr;
        hva_t reg_end = hva + mem->memory_size;
-       bool writable = !(mem->flags & KVM_MEM_READONLY);
        int ret = 0;
 
        if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
        mmap_read_lock(current->mm);
        /*
         * A memory region could potentially cover multiple VMAs, and any holes
-        * between them, so iterate over all of them to find out if we can map
-        * any of them right now.
+        * between them, so iterate over all of them.
         *
         *     +--------------------------------------------+
         * +---------------+----------------+   +----------------+
         */
        do {
                struct vm_area_struct *vma;
-               hva_t vm_start, vm_end;
 
                vma = find_vma_intersection(current->mm, hva, reg_end);
                if (!vma)
                        break;
 
-               /*
-                * Take the intersection of this VMA with the memory region
-                */
-               vm_start = max(hva, vma->vm_start);
-               vm_end = min(reg_end, vma->vm_end);
-
                if (vma->vm_flags & VM_PFNMAP) {
-                       gpa_t gpa = mem->guest_phys_addr +
-                                   (vm_start - mem->userspace_addr);
-                       phys_addr_t pa;
-
-                       pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
-                       pa += vm_start - vma->vm_start;
-
                        /* IO region dirty page logging not allowed */
                        if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
                                ret = -EINVAL;
-                               goto out;
-                       }
-
-                       ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
-                                                   vm_end - vm_start,
-                                                   writable);
-                       if (ret)
                                break;
+                       }
                }
-               hva = vm_end;
+               hva = min(reg_end, vma->vm_end);
        } while (hva < reg_end);
 
-       if (change == KVM_MR_FLAGS_ONLY)
-               goto out;
-
-       spin_lock(&kvm->mmu_lock);
-       if (ret)
-               unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
-       else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
-               stage2_flush_memslot(kvm, memslot);
-       spin_unlock(&kvm->mmu_lock);
-out:
        mmap_read_unlock(current->mm);
        return ret;
 }