Some AMD processors only support a non-architectural means of enabling
speculative store bypass disable (SSBD). To allow a simplified view of
this to a guest, an architectural definition has been created through a new
CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f. With this, a
hypervisor can virtualize the existence of this definition and provide an
architectural method for using SSBD to a guest.
Add the new CPUID feature, the new MSR and update the existing SSBD
support to use this MSR when present.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Orabug:
28063992
CVE: CVE-2018-3639
(cherry picked from commit
11fb0683)
Signed-off-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
Conflicts:
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/msr-index.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/process.c
[
cpufeatures.h: different file name and different index
msr-index.h: different file location
bugs.c: different file name
process.c: different file structure
common.c: This is because we skipped the first two patches from the patch
series. We do no have enough feature bits to align all the actual feature in
our cpufeature structure. We created a synthetic feature and this is where we
detect and set it.
]
Signed-off-by: Brian Maly <brian.maly@oracle.com>
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_VIRT_SSBD ( 7*32+16) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_ZEN ( 7*32+17) /* "" CPU is AMD family 0x17 (Zen) */
#define X86_FEATURE_AMD_SSBD ( 7*32+18) /* "" AMD RDS implementation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
+
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
{
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
- if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
+ else if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
wrmsrl(MSR_AMD64_LS_CFG, msrval);
}
set_cpu_cap(c, X86_FEATURE_IBRS);
if (ebx & BIT(15))
set_cpu_cap(c, X86_FEATURE_STIBP);
+ if (ebx & BIT(25))
+ set_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
}
#ifdef CONFIG_X86_32
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
}
#endif
+static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
+{
+ /*
+ * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
+ * so ssbd_tif_to_spec_ctrl() just works.
+ */
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+}
+
static __always_inline void intel_set_ssb_state(unsigned long tifn)
{
u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
{
- if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+ amd_set_ssb_virt_state(tifn);
+ else if (static_cpu_has(X86_FEATURE_AMD_SSBD))
amd_set_core_ssb_state(tifn);
else
intel_set_ssb_state(tifn);