bool __read_mostly eager_page_split = true;
 module_param(eager_page_split, bool, 0644);
 
+/* Enable/disable SMT_RSB bug mitigation */
+bool __read_mostly mitigate_smt_rsb;
+module_param(mitigate_smt_rsb, bool, 0444);
+
 /*
  * Restoring the host value for MSRs that are only consumed when running in
  * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
                r = KVM_CLOCK_VALID_FLAGS;
                break;
        case KVM_CAP_X86_DISABLE_EXITS:
-               r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
-                     KVM_X86_DISABLE_EXITS_CSTATE;
-               if(kvm_can_mwait_in_guest())
-                       r |= KVM_X86_DISABLE_EXITS_MWAIT;
+               r = KVM_X86_DISABLE_EXITS_PAUSE;
+
+               if (!mitigate_smt_rsb) {
+                       r |= KVM_X86_DISABLE_EXITS_HLT |
+                            KVM_X86_DISABLE_EXITS_CSTATE;
+
+                       if (kvm_can_mwait_in_guest())
+                               r |= KVM_X86_DISABLE_EXITS_MWAIT;
+               }
                break;
        case KVM_CAP_X86_SMM:
                if (!IS_ENABLED(CONFIG_KVM_SMM))
                if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS)
                        break;
 
-               if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
-                       kvm_can_mwait_in_guest())
-                       kvm->arch.mwait_in_guest = true;
-               if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
-                       kvm->arch.hlt_in_guest = true;
                if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
                        kvm->arch.pause_in_guest = true;
-               if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
-                       kvm->arch.cstate_in_guest = true;
+
+#define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \
+                   "KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests."
+
+               if (!mitigate_smt_rsb) {
+                       if (boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible() &&
+                           (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE))
+                               pr_warn_once(SMT_RSB_MSG);
+
+                       if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
+                           kvm_can_mwait_in_guest())
+                               kvm->arch.mwait_in_guest = true;
+                       if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
+                               kvm->arch.hlt_in_guest = true;
+                       if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
+                               kvm->arch.cstate_in_guest = true;
+               }
+
                r = 0;
                break;
        case KVM_CAP_MSR_PLATFORM_INFO:
 static int __init kvm_x86_init(void)
 {
        kvm_mmu_x86_module_init();
+       mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
        return 0;
 }
 module_init(kvm_x86_init);