return vmx_handle_exit(vcpu, fastpath);
 }
 
+#ifdef CONFIG_KVM_SMM
+static int vt_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+{
+       if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+               return 0;
+
+       return vmx_smi_allowed(vcpu, for_injection);
+}
+
+static int vt_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
+{
+       if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+               return 0;
+
+       return vmx_enter_smm(vcpu, smram);
+}
+
+static int vt_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
+{
+       if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+               return 0;
+
+       return vmx_leave_smm(vcpu, smram);
+}
+
+static void vt_enable_smi_window(struct kvm_vcpu *vcpu)
+{
+       if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
+               return;
+
+       /* RSM will cause a vmexit anyway.  */
+       vmx_enable_smi_window(vcpu);
+}
+#endif
+
 static void vt_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
 {
        struct pi_desc *pi = vcpu_to_pi_desc(vcpu);
        .setup_mce = vmx_setup_mce,
 
 #ifdef CONFIG_KVM_SMM
-       .smi_allowed = vmx_smi_allowed,
-       .enter_smm = vmx_enter_smm,
-       .leave_smm = vmx_leave_smm,
-       .enable_smi_window = vmx_enable_smi_window,
+       .smi_allowed = vt_smi_allowed,
+       .enter_smm = vt_enter_smm,
+       .leave_smm = vt_leave_smm,
+       .enable_smi_window = vt_enable_smi_window,
 #endif
 
        .check_emulate_instruction = vmx_check_emulate_instruction,