void (*setup_mce)(struct kvm_vcpu *vcpu);
 
+       int (*smi_allowed)(struct kvm_vcpu *vcpu);
        int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
        int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
 };
 
        vcpu->arch.mcg_cap &= 0x1ff;
 }
 
+static int svm_smi_allowed(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
 static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 {
        /* TODO: Implement */
        .update_pi_irte = svm_update_pi_irte,
        .setup_mce = svm_setup_mce,
 
+       .smi_allowed = svm_smi_allowed,
        .pre_enter_smm = svm_pre_enter_smm,
        .pre_leave_smm = svm_pre_leave_smm,
 };
 
                        ~FEATURE_CONTROL_LMCE;
 }
 
+static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
 static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 {
        /* TODO: Implement */
 
        .setup_mce = vmx_setup_mce,
 
+       .smi_allowed = vmx_smi_allowed,
        .pre_enter_smm = vmx_pre_enter_smm,
        .pre_leave_smm = vmx_pre_leave_smm,
 };
 
                }
 
                kvm_x86_ops->queue_exception(vcpu);
-       } else if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
+       } else if (vcpu->arch.smi_pending && !is_smm(vcpu) && kvm_x86_ops->smi_allowed(vcpu)) {
                vcpu->arch.smi_pending = false;
                enter_smm(vcpu);
        } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {