4.29 KVM_GET_VCPU_EVENTS
 
 Capability: KVM_CAP_VCPU_EVENTS
+Extended by: KVM_CAP_INTR_SHADOW
 Architectures: x86
 Type: vm ioctl
 Parameters: struct kvm_vcpu_event (out)
                __u8 injected;
                __u8 nr;
                __u8 soft;
-               __u8 pad;
+               __u8 shadow;
        } interrupt;
        struct {
                __u8 injected;
        __u32 flags;
 };
 
+KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
+interrupt.shadow contains a valid state. Otherwise, this field is undefined.
+
 4.30 KVM_SET_VCPU_EVENTS
 
 Capability: KVM_CAP_VCPU_EVENTS
+Extended by: KVM_CAP_INTR_SHADOW
 Architectures: x86
 Type: vm ioctl
 Parameters: struct kvm_vcpu_event (in)
 KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
 KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
 
+If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
+the flags field to signal that interrupt.shadow contains a valid state and
+shall be written into the VCPU.
+
 
 5. The kvm_run structure
 
 
 /* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
 #define KVM_VCPUEVENT_VALID_NMI_PENDING        0x00000001
 #define KVM_VCPUEVENT_VALID_SIPI_VECTOR        0x00000002
+#define KVM_VCPUEVENT_VALID_SHADOW     0x00000004
+
+/* Interrupt shadow states */
+#define KVM_X86_SHADOW_INT_MOV_SS      0x01
+#define KVM_X86_SHADOW_INT_STI         0x02
 
 /* for KVM_GET/SET_VCPU_EVENTS */
 struct kvm_vcpu_events {
                __u8 injected;
                __u8 nr;
                __u8 soft;
-               __u8 pad;
+               __u8 shadow;
        } interrupt;
        struct {
                __u8 injected;
 
        struct fetch_cache fetch;
 };
 
-#define X86_SHADOW_INT_MOV_SS  1
-#define X86_SHADOW_INT_STI     2
-
 struct x86_emulate_ctxt {
        /* Register state before/after emulation. */
        struct kvm_vcpu *vcpu;
 
                }
 
                if (c->modrm_reg == VCPU_SREG_SS)
-                       toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
+                       toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_MOV_SS);
 
                rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
 
                if (emulator_bad_iopl(ctxt))
                        kvm_inject_gp(ctxt->vcpu, 0);
                else {
-                       toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
+                       toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_STI);
                        ctxt->eflags |= X86_EFLAGS_IF;
                        c->dst.type = OP_NONE;  /* Disable writeback. */
                }
 
        u32 ret = 0;
 
        if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
-               ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS;
+               ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
        return ret & mask;
 }
 
 
        int ret = 0;
 
        if (interruptibility & GUEST_INTR_STATE_STI)
-               ret |= X86_SHADOW_INT_STI;
+               ret |= KVM_X86_SHADOW_INT_STI;
        if (interruptibility & GUEST_INTR_STATE_MOV_SS)
-               ret |= X86_SHADOW_INT_MOV_SS;
+               ret |= KVM_X86_SHADOW_INT_MOV_SS;
 
        return ret & mask;
 }
 
        interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
 
-       if (mask & X86_SHADOW_INT_MOV_SS)
+       if (mask & KVM_X86_SHADOW_INT_MOV_SS)
                interruptibility |= GUEST_INTR_STATE_MOV_SS;
-       if (mask & X86_SHADOW_INT_STI)
+       else if (mask & KVM_X86_SHADOW_INT_STI)
                interruptibility |= GUEST_INTR_STATE_STI;
 
        if ((interruptibility != interruptibility_old))
 
                vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
        events->interrupt.nr = vcpu->arch.interrupt.nr;
        events->interrupt.soft = 0;
+       events->interrupt.shadow =
+               kvm_x86_ops->get_interrupt_shadow(vcpu,
+                       KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
 
        events->nmi.injected = vcpu->arch.nmi_injected;
        events->nmi.pending = vcpu->arch.nmi_pending;
        events->sipi_vector = vcpu->arch.sipi_vector;
 
        events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
-                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
+                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+                        | KVM_VCPUEVENT_VALID_SHADOW);
 
        vcpu_put(vcpu);
 }
                                              struct kvm_vcpu_events *events)
 {
        if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
-                             | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
+                             | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+                             | KVM_VCPUEVENT_VALID_SHADOW))
                return -EINVAL;
 
        vcpu_load(vcpu);
        vcpu->arch.interrupt.soft = events->interrupt.soft;
        if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
                kvm_pic_clear_isr_ack(vcpu->kvm);
+       if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
+               kvm_x86_ops->set_interrupt_shadow(vcpu,
+                                                 events->interrupt.shadow);
 
        vcpu->arch.nmi_injected = events->nmi.injected;
        if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
 
 #define KVM_CAP_HYPERV_SPIN 46
 #define KVM_CAP_PCI_SEGMENT 47
 #define KVM_CAP_PPC_PAIRED_SINGLES 48
+#define KVM_CAP_INTR_SHADOW 49
 #define KVM_CAP_X86_ROBUST_SINGLESTEP 51
 
 #ifdef KVM_CAP_IRQ_ROUTING