kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
 }
 
+noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu);
+
 #endif /* __KVM_X86_VMX_COMMON_H */
 
        }
 }
 
+noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu)
+{
+       if ((u16)vmx_get_exit_reason(vcpu).basic != EXIT_REASON_EXCEPTION_NMI ||
+           !is_nmi(vmx_get_intr_info(vcpu)))
+               return;
+
+       kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
+       if (cpu_feature_enabled(X86_FEATURE_FRED))
+               fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
+       else
+               vmx_do_nmi_irqoff();
+       kvm_after_interrupt(vcpu);
+}
+
 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
                                        unsigned int flags)
 {
        if (likely(!vmx_get_exit_reason(vcpu).failed_vmentry))
                vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
 
-       if ((u16)vmx_get_exit_reason(vcpu).basic == EXIT_REASON_EXCEPTION_NMI &&
-           is_nmi(vmx_get_intr_info(vcpu))) {
-               kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
-               if (cpu_feature_enabled(X86_FEATURE_FRED))
-                       fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
-               else
-                       vmx_do_nmi_irqoff();
-               kvm_after_interrupt(vcpu);
-       }
+       vmx_handle_nmi(vcpu);
 
 out:
        guest_state_exit_irqoff();