if (!ghcb_sw_scratch_is_valid(ghcb))
                        goto vmgexit_err;
                break;
+       case SVM_VMGEXIT_NMI_COMPLETE:
        case SVM_VMGEXIT_UNSUPPORTED_EVENT:
                break;
        default:
                                            control->exit_info_2,
                                            svm->ghcb_sa);
                break;
+       case SVM_VMGEXIT_NMI_COMPLETE:
+               ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
+               break;
        case SVM_VMGEXIT_UNSUPPORTED_EVENT:
                vcpu_unimpl(&svm->vcpu,
                            "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
 
 static int iret_interception(struct vcpu_svm *svm)
 {
        ++svm->vcpu.stat.nmi_window_exits;
-       svm_clr_intercept(svm, INTERCEPT_IRET);
        svm->vcpu.arch.hflags |= HF_IRET_MASK;
-       svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
+       if (!sev_es_guest(svm->vcpu.kvm)) {
+               svm_clr_intercept(svm, INTERCEPT_IRET);
+               svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
+       }
        kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
        return 1;
 }
 
        svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
        vcpu->arch.hflags |= HF_NMI_MASK;
-       svm_set_intercept(svm, INTERCEPT_IRET);
+       if (!sev_es_guest(svm->vcpu.kvm))
+               svm_set_intercept(svm, INTERCEPT_IRET);
        ++vcpu->stat.nmi_injections;
 }
 
 
        if (masked) {
                svm->vcpu.arch.hflags |= HF_NMI_MASK;
-               svm_set_intercept(svm, INTERCEPT_IRET);
+               if (!sev_es_guest(svm->vcpu.kvm))
+                       svm_set_intercept(svm, INTERCEPT_IRET);
        } else {
                svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
-               svm_clr_intercept(svm, INTERCEPT_IRET);
+               if (!sev_es_guest(svm->vcpu.kvm))
+                       svm_clr_intercept(svm, INTERCEPT_IRET);
        }
 }
 
         * If we've made progress since setting HF_IRET_MASK, we've
         * executed an IRET and can allow NMI injection.
         */
-       if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
-           && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
+       if ((svm->vcpu.arch.hflags & HF_IRET_MASK) &&
+           (sev_es_guest(svm->vcpu.kvm) ||
+            kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip)) {
                svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
                kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
        }