--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Oracle and/or its affiliates.
+ *
+ * Based on:
+ *   svm_int_ctl_test
+ *
+ *   Copyright (C) 2021, Red Hat, Inc.
+ *
+ */
+
+#include <stdatomic.h>
+#include <stdio.h>
+#include <unistd.h>
+#include "apic.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "test_util.h"
+#include "../lib/kvm_util_internal.h"
+
+#define VCPU_ID                0
+#define INT_NR                 0x20
+#define X86_FEATURE_NRIPS      BIT(3)
+
+static_assert(ATOMIC_INT_LOCK_FREE == 2, "atomic int is not lockless");
+
+static unsigned int bp_fired;
+static void guest_bp_handler(struct ex_regs *regs)
+{
+       bp_fired++;
+}
+
+static unsigned int int_fired;
+static void l2_guest_code_int(void);
+
+static void guest_int_handler(struct ex_regs *regs)
+{
+       int_fired++;
+       GUEST_ASSERT_2(regs->rip == (unsigned long)l2_guest_code_int,
+                      regs->rip, (unsigned long)l2_guest_code_int);
+}
+
+static void l2_guest_code_int(void)
+{
+       GUEST_ASSERT_1(int_fired == 1, int_fired);
+       vmmcall();
+       ud2();
+
+       GUEST_ASSERT_1(bp_fired == 1, bp_fired);
+       hlt();
+}
+
+static atomic_int nmi_stage;
+#define nmi_stage_get() atomic_load_explicit(&nmi_stage, memory_order_acquire)
+#define nmi_stage_inc() atomic_fetch_add_explicit(&nmi_stage, 1, memory_order_acq_rel)
+static void guest_nmi_handler(struct ex_regs *regs)
+{
+       nmi_stage_inc();
+
+       if (nmi_stage_get() == 1) {
+               vmmcall();
+               GUEST_ASSERT(false);
+       } else {
+               GUEST_ASSERT_1(nmi_stage_get() == 3, nmi_stage_get());
+               GUEST_DONE();
+       }
+}
+
+static void l2_guest_code_nmi(void)
+{
+       ud2();
+}
+
+static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt)
+{
+       #define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+       struct vmcb *vmcb = svm->vmcb;
+
+       if (is_nmi)
+               x2apic_enable();
+
+       /* Prepare for L2 execution. */
+       generic_svm_setup(svm,
+                         is_nmi ? l2_guest_code_nmi : l2_guest_code_int,
+                         &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+       vmcb->control.intercept_exceptions |= BIT(PF_VECTOR) | BIT(UD_VECTOR);
+       vmcb->control.intercept |= BIT(INTERCEPT_NMI) | BIT(INTERCEPT_HLT);
+
+       if (is_nmi) {
+               vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
+       } else {
+               vmcb->control.event_inj = INT_NR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_SOFT;
+               /* The return address pushed on stack */
+               vmcb->control.next_rip = vmcb->save.rip;
+       }
+
+       run_guest(vmcb, svm->vmcb_gpa);
+       GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
+                      vmcb->control.exit_code,
+                      vmcb->control.exit_info_1, vmcb->control.exit_info_2);
+
+       if (is_nmi) {
+               clgi();
+               x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_NMI);
+
+               GUEST_ASSERT_1(nmi_stage_get() == 1, nmi_stage_get());
+               nmi_stage_inc();
+
+               stgi();
+               /* self-NMI happens here */
+               while (true)
+                       cpu_relax();
+       }
+
+       /* Skip over VMMCALL */
+       vmcb->save.rip += 3;
+
+       /* Switch to alternate IDT to cause intervening NPF again */
+       vmcb->save.idtr.base = idt_alt;
+       vmcb->control.clean = 0; /* &= ~BIT(VMCB_DT) would be enough */
+
+       vmcb->control.event_inj = BP_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
+       /* The return address pushed on stack, skip over UD2 */
+       vmcb->control.next_rip = vmcb->save.rip + 2;
+
+       run_guest(vmcb, svm->vmcb_gpa);
+       GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_HLT,
+                      vmcb->control.exit_code,
+                      vmcb->control.exit_info_1, vmcb->control.exit_info_2);
+
+       GUEST_DONE();
+}
+
+static void run_test(bool is_nmi)
+{
+       struct kvm_vm *vm;
+       vm_vaddr_t svm_gva;
+       vm_vaddr_t idt_alt_vm;
+       struct kvm_guest_debug debug;
+
+       pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int");
+
+       vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+
+       vm_init_descriptor_tables(vm);
+       vcpu_init_descriptor_tables(vm, VCPU_ID);
+
+       vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
+       vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler);
+       vm_install_exception_handler(vm, INT_NR, guest_int_handler);
+
+       vcpu_alloc_svm(vm, &svm_gva);
+
+       if (!is_nmi) {
+               void *idt, *idt_alt;
+
+               idt_alt_vm = vm_vaddr_alloc_page(vm);
+               idt_alt = addr_gva2hva(vm, idt_alt_vm);
+               idt = addr_gva2hva(vm, vm->idt);
+               memcpy(idt_alt, idt, getpagesize());
+       } else {
+               idt_alt_vm = 0;
+       }
+       vcpu_args_set(vm, VCPU_ID, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm);
+
+       memset(&debug, 0, sizeof(debug));
+       vcpu_set_guest_debug(vm, VCPU_ID, &debug);
+
+       struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+       struct ucall uc;
+
+       alarm(2);
+       vcpu_run(vm, VCPU_ID);
+       alarm(0);
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                   "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+                   run->exit_reason,
+                   exit_reason_str(run->exit_reason));
+
+       switch (get_ucall(vm, VCPU_ID, &uc)) {
+       case UCALL_ABORT:
+               TEST_FAIL("%s at %s:%ld, vals = 0x%lx 0x%lx 0x%lx", (const char *)uc.args[0],
+                         __FILE__, uc.args[1], uc.args[2], uc.args[3], uc.args[4]);
+               break;
+               /* NOT REACHED */
+       case UCALL_DONE:
+               goto done;
+       default:
+               TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+       }
+done:
+       kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+       struct kvm_cpuid_entry2 *cpuid;
+
+       /* Tell stdout not to buffer its content */
+       setbuf(stdout, NULL);
+
+       nested_svm_check_supported();
+
+       cpuid = kvm_get_supported_cpuid_entry(0x8000000a);
+       TEST_ASSERT(cpuid->edx & X86_FEATURE_NRIPS,
+                   "KVM with nSVM is supposed to unconditionally advertise nRIP Save\n");
+
+       atomic_init(&nmi_stage, 0);
+
+       run_test(false);
+       run_test(true);
+
+       return 0;
+}