#include "vmx.h"
 
 #define VCPU_ID                5
+#define NMI_VECTOR     2
 
 static int ud_count;
 
+void enable_x2apic(void)
+{
+       uint32_t spiv_reg = APIC_BASE_MSR + (APIC_SPIV >> 4);
+
+       wrmsr(MSR_IA32_APICBASE, rdmsr(MSR_IA32_APICBASE) |
+             MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD);
+       wrmsr(spiv_reg, rdmsr(spiv_reg) | APIC_SPIV_APIC_ENABLED);
+}
+
 static void guest_ud_handler(struct ex_regs *regs)
 {
        ud_count++;
        regs->rip += 3; /* VMLAUNCH */
 }
 
+static void guest_nmi_handler(struct ex_regs *regs)
+{
+}
+
 void l2_guest_code(void)
 {
        GUEST_SYNC(7);
 
        GUEST_SYNC(8);
 
+       /* Forced exit to L1 upon restore */
+       GUEST_SYNC(9);
+
        /* Done, exit to L1 and never come back.  */
        vmcall();
 }
 
-void l1_guest_code(struct vmx_pages *vmx_pages)
+void guest_code(struct vmx_pages *vmx_pages)
 {
 #define L2_GUEST_STACK_SIZE 64
        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
 
+       enable_x2apic();
+
+       GUEST_SYNC(1);
+       GUEST_SYNC(2);
+
        enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist);
 
        GUEST_ASSERT(vmx_pages->vmcs_gpa);
        current_evmcs->revision_id = EVMCS_VERSION;
        GUEST_SYNC(6);
 
+       current_evmcs->pin_based_vm_exec_control |=
+               PIN_BASED_NMI_EXITING;
        GUEST_ASSERT(!vmlaunch());
        GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
-       GUEST_SYNC(9);
+
+       /*
+        * NMI forces L2->L1 exit, resuming L2 and hope that EVMCS is
+        * up-to-date (RIP points where it should and not at the beginning
+        * of l2_guest_code(). GUEST_SYNC(9) checkes that.
+        */
        GUEST_ASSERT(!vmresume());
-       GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
-       GUEST_SYNC(10);
-}
 
-void guest_code(struct vmx_pages *vmx_pages)
-{
-       GUEST_SYNC(1);
-       GUEST_SYNC(2);
+       GUEST_SYNC(10);
 
-       if (vmx_pages)
-               l1_guest_code(vmx_pages);
+       GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+       GUEST_SYNC(11);
 
        /* Try enlightened vmptrld with an incorrect GPA */
        evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs);
        GUEST_DONE();
 }
 
+void inject_nmi(struct kvm_vm *vm)
+{
+       struct kvm_vcpu_events events;
+
+       vcpu_events_get(vm, VCPU_ID, &events);
+
+       events.nmi.pending = 1;
+       events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
+
+       vcpu_events_set(vm, VCPU_ID, &events);
+}
+
 int main(int argc, char *argv[])
 {
        vm_vaddr_t vmx_pages_gva = 0;
        vm_init_descriptor_tables(vm);
        vcpu_init_descriptor_tables(vm, VCPU_ID);
        vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
+       vm_handle_exception(vm, NMI_VECTOR, guest_nmi_handler);
+
+       pr_info("Running L1 which uses EVMCS to run L2\n");
 
        for (stage = 1;; stage++) {
                _vcpu_run(vm, VCPU_ID);
                TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
                            "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
                            (ulong) regs2.rdi, (ulong) regs2.rsi);
+
+               /* Force immediate L2->L1 exit before resuming */
+               if (stage == 8) {
+                       pr_info("Injecting NMI into L1 before L2 had a chance to run after restore\n");
+                       inject_nmi(vm);
+               }
        }
 
 done: