select PREEMPT_NOTIFIERS
        select ANON_INODES
        select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+       select HAVE_KVM_VCPU_ASYNC_IOCTL
        select KVM_MMIO
        select MMU_NOTIFIER
        select SRCU
 
        return r;
 }
 
-long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
-                        unsigned long arg)
+long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
+                              unsigned long arg)
 {
        struct kvm_vcpu *vcpu = filp->private_data;
        void __user *argp = (void __user *)arg;
-       long r;
 
        if (ioctl == KVM_INTERRUPT) {
                struct kvm_mips_interrupt irq;
                return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
        }
 
+       return -ENOIOCTLCMD;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
+                        unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
+
        vcpu_load(vcpu);
 
        switch (ioctl) {
 
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select HAVE_KVM_EVENTFD
+       select HAVE_KVM_VCPU_ASYNC_IOCTL
        select SRCU
        select KVM_VFIO
        select IRQ_BYPASS_MANAGER
 
        return -EINVAL;
 }
 
-long kvm_arch_vcpu_ioctl(struct file *filp,
-                         unsigned int ioctl, unsigned long arg)
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+                              unsigned int ioctl, unsigned long arg)
 {
        struct kvm_vcpu *vcpu = filp->private_data;
        void __user *argp = (void __user *)arg;
-       long r;
 
        if (ioctl == KVM_INTERRUPT) {
                struct kvm_interrupt irq;
                        return -EFAULT;
                return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
        }
+       return -ENOIOCTLCMD;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+                         unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
 
        vcpu_load(vcpu);
 
 
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select HAVE_KVM_CPU_RELAX_INTERCEPT
+       select HAVE_KVM_VCPU_ASYNC_IOCTL
        select HAVE_KVM_EVENTFD
        select KVM_ASYNC_PF
        select KVM_ASYNC_PF_SYNC
 
        return r;
 }
 
-long kvm_arch_vcpu_ioctl(struct file *filp,
-                        unsigned int ioctl, unsigned long arg)
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+                              unsigned int ioctl, unsigned long arg)
 {
        struct kvm_vcpu *vcpu = filp->private_data;
        void __user *argp = (void __user *)arg;
-       int idx;
-       long r;
 
        switch (ioctl) {
        case KVM_S390_IRQ: {
                return kvm_s390_inject_vcpu(vcpu, &s390irq);
        }
        }
+       return -ENOIOCTLCMD;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+                        unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       int idx;
+       long r;
 
        vcpu_load(vcpu);
 
 
 }
 #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
 
+#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+                              unsigned int ioctl, unsigned long arg);
+#else
+static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
+                                            unsigned int ioctl,
+                                            unsigned long arg)
+{
+       return -ENOIOCTLCMD;
+}
+#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
+
 #endif
 
 
 config HAVE_KVM_IRQ_BYPASS
        bool
+
+config HAVE_KVM_VCPU_ASYNC_IOCTL
+       bool
 
        if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
                return -EINVAL;
 
-#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
        /*
-        * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
-        * so vcpu_load() would break it.
+        * Some architectures have vcpu ioctls that are asynchronous to vcpu
+        * execution; mutex_lock() would break them.
         */
-       if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT)
-               return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
-#endif
-
+       r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
+       if (r != -ENOIOCTLCMD)
+               return r;
 
        if (mutex_lock_killable(&vcpu->mutex))
                return -EINTR;