MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
 #endif
        MSR_EFER, MSR_TSC_AUX, MSR_STAR,
+       MSR_IA32_TSX_CTRL,
 };
 
 #if IS_ENABLED(CONFIG_HYPERV)
        index = __find_msr_index(vmx, MSR_TSC_AUX);
        if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
                move_msr_up(vmx, index, save_nmsrs++);
+       index = __find_msr_index(vmx, MSR_IA32_TSX_CTRL);
+       if (index >= 0)
+               move_msr_up(vmx, index, save_nmsrs++);
 
        vmx->save_nmsrs = save_nmsrs;
        vmx->guest_msrs_ready = false;
 #endif
        case MSR_EFER:
                return kvm_get_msr_common(vcpu, msr_info);
+       case MSR_IA32_TSX_CTRL:
+               if (!msr_info->host_initiated &&
+                   !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
+                       return 1;
+               goto find_shared_msr;
        case MSR_IA32_UMWAIT_CONTROL:
                if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
                        return 1;
                if (!msr_info->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
                        return 1;
-               /* Else, falls through */
+               goto find_shared_msr;
        default:
+       find_shared_msr:
                msr = find_msr_entry(vmx, msr_info->index);
                if (msr) {
                        msr_info->data = msr->data;
                                              MSR_IA32_SPEC_CTRL,
                                              MSR_TYPE_RW);
                break;
+       case MSR_IA32_TSX_CTRL:
+               if (!msr_info->host_initiated &&
+                   !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
+                       return 1;
+               if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
+                       return 1;
+               goto find_shared_msr;
        case MSR_IA32_PRED_CMD:
                if (!msr_info->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
                /* Check reserved bit, higher 32 bits should be zero */
                if ((data >> 32) != 0)
                        return 1;
-               /* Else, falls through */
+               goto find_shared_msr;
+
        default:
+       find_shared_msr:
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
                        u64 old_msr_data = msr->data;
                        continue;
                vmx->guest_msrs[j].index = i;
                vmx->guest_msrs[j].data = 0;
-               vmx->guest_msrs[j].mask = -1ull;
+
+               switch (index) {
+               case MSR_IA32_TSX_CTRL:
+                       /*
+                        * No need to pass TSX_CTRL_CPUID_CLEAR through, so
+                        * let's avoid changing CPUID bits under the host
+                        * kernel's feet.
+                        */
+                       vmx->guest_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+                       break;
+               default:
+                       vmx->guest_msrs[j].mask = -1ull;
+                       break;
+               }
                ++vmx->nmsrs;
        }
 
 
                data |= ARCH_CAP_MDS_NO;
 
        /*
-        * On TAA affected systems, export MDS_NO=0 when:
-        *      - TSX is enabled on the host, i.e. X86_FEATURE_RTM=1.
-        *      - Updated microcode is present. This is detected by
-        *        the presence of ARCH_CAP_TSX_CTRL_MSR and ensures
-        *        that VERW clears CPU buffers.
-        *
-        * When MDS_NO=0 is exported, guests deploy clear CPU buffer
-        * mitigation and don't complain:
-        *
-        *      "Vulnerable: Clear CPU buffers attempted, no microcode"
-        *
-        * If TSX is disabled on the system, guests are also mitigated against
-        * TAA and clear CPU buffer mitigation is not required for guests.
+        * On TAA affected systems:
+        *      - nothing to do if TSX is disabled on the host.
+        *      - we emulate TSX_CTRL if present on the host.
+        *        This lets the guest use VERW to clear CPU buffers.
         */
        if (!boot_cpu_has(X86_FEATURE_RTM))
-               data &= ~ARCH_CAP_TAA_NO;
+               data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR);
        else if (!boot_cpu_has_bug(X86_BUG_TAA))
                data |= ARCH_CAP_TAA_NO;
-       else if (data & ARCH_CAP_TSX_CTRL_MSR)
-               data &= ~ARCH_CAP_MDS_NO;
 
-       /* KVM does not emulate MSR_IA32_TSX_CTRL.  */
-       data &= ~ARCH_CAP_TSX_CTRL_MSR;
        return data;
 }
 EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);