static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
 {
-       if (is_guest_mode(vcpu)) {
-               if (to_vmx(vcpu)->nested.nested_run_pending)
-                       return 0;
-               if (nested_exit_on_nmi(vcpu)) {
-                       nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
-                                         NMI_VECTOR | INTR_TYPE_NMI_INTR |
-                                         INTR_INFO_VALID_MASK, 0);
-                       /*
-                        * The NMI-triggered VM exit counts as injection:
-                        * clear this one and block further NMIs.
-                        */
-                       vcpu->arch.nmi_pending = 0;
-                       vmx_set_nmi_mask(vcpu, true);
-                       return 0;
-               }
-       }
+       if (to_vmx(vcpu)->nested.nested_run_pending)
+               return 0;
 
        if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
                return 0;
 
 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
-       if (is_guest_mode(vcpu)) {
-               if (to_vmx(vcpu)->nested.nested_run_pending)
-                       return 0;
-               if (nested_exit_on_intr(vcpu)) {
-                       nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
-                                         0, 0);
-                       /*
-                        * fall through to normal code, but now in L1, not L2
-                        */
-               }
-       }
-
-       return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
+       return (!to_vmx(vcpu)->nested.nested_run_pending &&
+               vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
                !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
                        (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
 }
        }
 }
 
+static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
+               if (vmx->nested.nested_run_pending)
+                       return -EBUSY;
+               nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
+                                 NMI_VECTOR | INTR_TYPE_NMI_INTR |
+                                 INTR_INFO_VALID_MASK, 0);
+               /*
+                * The NMI-triggered VM exit counts as injection:
+                * clear this one and block further NMIs.
+                */
+               vcpu->arch.nmi_pending = 0;
+               vmx_set_nmi_mask(vcpu, true);
+               return 0;
+       }
+
+       if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
+           nested_exit_on_intr(vcpu)) {
+               if (vmx->nested.nested_run_pending)
+                       return -EBUSY;
+               nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
+       }
+
+       return 0;
+}
+
 /*
  * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
  * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
                nested_vmx_succeed(vcpu);
        if (enable_shadow_vmcs)
                vmx->nested.sync_shadow_vmcs = true;
+
+       /* in case we halted in L2 */
+       vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 }
 
 /*
        .check_intercept = vmx_check_intercept,
        .handle_external_intr = vmx_handle_external_intr,
        .mpx_supported = vmx_mpx_supported,
+
+       .check_nested_events = vmx_check_nested_events,
 };
 
 static int __init vmx_init(void)
 
        kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
 }
 
-static void inject_pending_event(struct kvm_vcpu *vcpu)
+static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
 {
+       int r;
+
        /* try to reinject previous events if any */
        if (vcpu->arch.exception.pending) {
                trace_kvm_inj_exception(vcpu->arch.exception.nr,
                                          vcpu->arch.exception.has_error_code,
                                          vcpu->arch.exception.error_code,
                                          vcpu->arch.exception.reinject);
-               return;
+               return 0;
        }
 
        if (vcpu->arch.nmi_injected) {
                kvm_x86_ops->set_nmi(vcpu);
-               return;
+               return 0;
        }
 
        if (vcpu->arch.interrupt.pending) {
                kvm_x86_ops->set_irq(vcpu);
-               return;
+               return 0;
+       }
+
+       if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
+               r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+               if (r != 0)
+                       return r;
        }
 
        /* try to inject new event if pending */
                        kvm_x86_ops->set_irq(vcpu);
                }
        }
+       return 0;
 }
 
 static void process_nmi(struct kvm_vcpu *vcpu)
                        goto out;
                }
 
-               inject_pending_event(vcpu);
-
+               if (inject_pending_event(vcpu, req_int_win) != 0)
+                       req_immediate_exit = true;
                /* enable NMI/IRQ window open exits if needed */
-               if (vcpu->arch.nmi_pending)
+               else if (vcpu->arch.nmi_pending)
                        req_immediate_exit =
                                kvm_x86_ops->enable_nmi_window(vcpu) != 0;
                else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
+       if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
+               kvm_x86_ops->check_nested_events(vcpu, false);
+
        return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
                !vcpu->arch.apf.halted)
                || !list_empty_careful(&vcpu->async_pf.done)