KVM handlers should exit to userspace with rc = -EREMOTE.
 
+7.5 KVM_CAP_SPLIT_IRQCHIP
+
+Architectures: x86
+Parameters: None
+Returns: 0 on success, -1 on error
+
+Create a local apic for each processor in the kernel. This can be used
+instead of KVM_CREATE_IRQCHIP if the userspace VMM wishes to emulate the
+IOAPIC and PIC (and also the PIT, even though this has to be enabled
+separately).
+
+This supersedes KVM_CREATE_IRQCHIP, creating only local APICs, but no in kernel
+IOAPIC or PIC. This also enables in kernel routing of interrupt requests.
+
+Fails if VCPU has already been created, or if the irqchip is already in the
+kernel (i.e. KVM_CREATE_IRQCHIP has already been called).
+
 
 8. Other capabilities.
 ----------------------
 
        u32 bsp_vcpu_id;
 
        u64 disabled_quirks;
+
+       bool irqchip_split;
 };
 
 struct kvm_vm_stat {
 
 #include <linux/kvm_host.h>
 #include <linux/slab.h>
 
+#include "ioapic.h"
 #include "irq.h"
 #include "i8254.h"
 #include "x86.h"
        struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
        s64 interval;
 
-       if (!irqchip_in_kernel(kvm) || ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
+       if (!ioapic_in_kernel(kvm) ||
+           ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
                return;
 
        interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
 
        return kvm->arch.vioapic;
 }
 
+static inline int ioapic_in_kernel(struct kvm *kvm)
+{
+       int ret;
+
+       ret = (ioapic_irqchip(kvm) != NULL);
+       return ret;
+}
+
 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
                int short_hand, unsigned int dest, int dest_mode);
 
        return kvm->arch.vpic;
 }
 
+static inline int irqchip_split(struct kvm *kvm)
+{
+       return kvm->arch.irqchip_split;
+}
+
 static inline int irqchip_in_kernel(struct kvm *kvm)
 {
        struct kvm_pic *vpic = pic_irqchip(kvm);
+       bool ret;
+
+       ret = (vpic != NULL);
+       ret |= irqchip_split(kvm);
 
        /* Read vpic before kvm->irq_routing.  */
        smp_rmb();
-       return vpic != NULL;
+       return ret;
 }
 
 static inline int lapic_in_kernel(struct kvm_vcpu *vcpu)
 
                goto unlock;
        }
        clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
-       if (!irqchip_in_kernel(kvm))
+       if (!ioapic_in_kernel(kvm))
                goto unlock;
 
        kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
        return kvm_set_irq_routing(kvm, default_routing,
                                   ARRAY_SIZE(default_routing), 0);
 }
+
+static const struct kvm_irq_routing_entry empty_routing[] = {};
+
+int kvm_setup_empty_irq_routing(struct kvm *kvm)
+{
+       return kvm_set_irq_routing(kvm, empty_routing, 0, 0);
+}
 
        if (old)
                kfree_rcu(old, rcu);
 
-       kvm_vcpu_request_scan_ioapic(kvm);
+       if (ioapic_in_kernel(kvm))
+               kvm_vcpu_request_scan_ioapic(kvm);
 }
 
 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
                kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
                                apic_find_highest_isr(apic));
        kvm_make_request(KVM_REQ_EVENT, vcpu);
-       kvm_rtc_eoi_tracking_restore_one(vcpu);
+       if (ioapic_in_kernel(vcpu->kvm))
+               kvm_rtc_eoi_tracking_restore_one(vcpu);
 }
 
 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
 
        case KVM_CAP_ENABLE_CAP_VM:
        case KVM_CAP_DISABLE_QUIRKS:
        case KVM_CAP_SET_BOOT_CPU_ID:
+       case KVM_CAP_SPLIT_IRQCHIP:
 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
        case KVM_CAP_ASSIGN_DEV_IRQ:
        case KVM_CAP_PCI_2_3:
                kvm->arch.disabled_quirks = cap->args[0];
                r = 0;
                break;
+       case KVM_CAP_SPLIT_IRQCHIP: {
+               mutex_lock(&kvm->lock);
+               r = -EEXIST;
+               if (irqchip_in_kernel(kvm))
+                       goto split_irqchip_unlock;
+               if (atomic_read(&kvm->online_vcpus))
+                       goto split_irqchip_unlock;
+               r = kvm_setup_empty_irq_routing(kvm);
+               if (r)
+                       goto split_irqchip_unlock;
+               /* Pairs with irqchip_in_kernel. */
+               smp_wmb();
+               kvm->arch.irqchip_split = true;
+               r = 0;
+split_irqchip_unlock:
+               mutex_unlock(&kvm->lock);
+               break;
+       }
        default:
                r = -EINVAL;
                break;
                }
 
                r = -ENXIO;
-               if (!irqchip_in_kernel(kvm))
+               if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
                        goto get_irqchip_out;
                r = kvm_vm_ioctl_get_irqchip(kvm, chip);
                if (r)
                }
 
                r = -ENXIO;
-               if (!irqchip_in_kernel(kvm))
+               if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
                        goto set_irqchip_out;
                r = kvm_vm_ioctl_set_irqchip(kvm, chip);
                if (r)
 
 #endif
 
 int kvm_setup_default_irq_routing(struct kvm *kvm);
+int kvm_setup_empty_irq_routing(struct kvm *kvm);
 int kvm_set_irq_routing(struct kvm *kvm,
                        const struct kvm_irq_routing_entry *entries,
                        unsigned nr,
 
 #define KVM_CAP_MULTI_ADDRESS_SPACE 118
 #define KVM_CAP_GUEST_DEBUG_HW_BPS 119
 #define KVM_CAP_GUEST_DEBUG_HW_WPS 120
+#define KVM_CAP_SPLIT_IRQCHIP 121
 
 #ifdef KVM_CAP_IRQ_ROUTING