#define KVM_S2PTE_FLAG_IS_IOMAP                (1UL << 0)
 #define KVM_S2_FLAG_LOGGING_ACTIVE     (1UL << 1)
 
+static bool is_iomap(unsigned long flags)
+{
+       return flags & KVM_S2PTE_FLAG_IS_IOMAP;
+}
+
 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
 {
        return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
 
        vma_pagesize = vma_kernel_pagesize(vma);
        if (logging_active ||
+           (vma->vm_flags & VM_PFNMAP) ||
            !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
                force_pte = true;
                vma_pagesize = PAGE_SIZE;
                        writable = false;
        }
 
+       if (exec_fault && is_iomap(flags))
+               return -ENOEXEC;
+
        spin_lock(&kvm->mmu_lock);
        if (mmu_notifier_retry(kvm, mmu_seq))
                goto out_unlock;
        if (writable)
                kvm_set_pfn_dirty(pfn);
 
-       if (fault_status != FSC_PERM)
+       if (fault_status != FSC_PERM && !is_iomap(flags))
                clean_dcache_guest_page(pfn, vma_pagesize);
 
        if (exec_fault)
        if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
                if (is_iabt) {
                        /* Prefetch Abort on I/O address */
-                       kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-                       ret = 1;
-                       goto out_unlock;
+                       ret = -ENOEXEC;
+                       goto out;
                }
 
                /*
        ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
        if (ret == 0)
                ret = 1;
+out:
+       if (ret == -ENOEXEC) {
+               kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+               ret = 1;
+       }
 out_unlock:
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        return ret;