From: Tom Lendacky Date: Thu, 17 May 2018 15:09:18 +0000 (+0200) Subject: x86/speculation: Add virtualized speculative store bypass disable support X-Git-Tag: v4.1.12-124.31.3~741 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=739493101e18c12242b9015dff41336cfac571ca;p=users%2Fjedix%2Flinux-maple.git x86/speculation: Add virtualized speculative store bypass disable support Some AMD processors only support a non-architectural means of enabling speculative store bypass disable (SSBD). To allow a simplified view of this to a guest, an architectural definition has been created through a new CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f. With this, a hypervisor can virtualize the existence of this definition and provide an architectural method for using SSBD to a guest. Add the new CPUID feature, the new MSR and update the existing SSBD support to use this MSR when present. Signed-off-by: Tom Lendacky Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Orabug: 28063992 CVE: CVE-2018-3639 (cherry picked from commit 11fb0683) Signed-off-by: Mihai Carabas Reviewed-by: Konrad Rzeszutek Wilk Reviewed-by: Darren Kenny Signed-off-by: Brian Maly Conflicts: arch/x86/include/asm/cpufeatures.h arch/x86/include/asm/msr-index.h arch/x86/kernel/cpu/bugs.c arch/x86/kernel/process.c [ cpufeatures.h: different file name and different index msr-index.h: different file location bugs.c: different file name process.c: different file structure common.c: This is because we skipped the first two patches from the patch series. We do no have enough feature bits to align all the actual feature in our cpufeature structure. We created a synthetic feature and this is where we detect and set it. ] Signed-off-by: Brian Maly --- diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 3940d0ec9759..b69aec41b145 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -197,6 +197,7 @@ #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ +#define X86_FEATURE_VIRT_SSBD ( 7*32+16) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_ZEN ( 7*32+17) /* "" CPU is AMD family 0x17 (Zen) */ #define X86_FEATURE_AMD_SSBD ( 7*32+18) /* "" AMD RDS implementation */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index 573b2e1d8613..dfc68ab361a2 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -318,6 +318,8 @@ #define MSR_AMD64_IBSOPDATA4 0xc001103d #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ +#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f + /* Fam 16h MSRs */ #define MSR_F16H_L2I_PERF_CTL 0xc0010230 #define MSR_F16H_L2I_PERF_CTR 0xc0010231 diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c index 089eafa0c359..3843b3585394 100644 --- a/arch/x86/kernel/cpu/bugs_64.c +++ b/arch/x86/kernel/cpu/bugs_64.c @@ -325,7 +325,9 @@ static void x86_amd_ssbd_enable(void) { u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; - if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) + if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) + wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); + else if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) wrmsrl(MSR_AMD64_LS_CFG, msrval); } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index fee520ff943f..6b6c09854537 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -763,6 +763,8 @@ void get_cpu_cap(struct cpuinfo_x86 *c, enum get_cpu_cap_behavior behavior) set_cpu_cap(c, X86_FEATURE_IBRS); if (ebx & BIT(15)) set_cpu_cap(c, X86_FEATURE_STIBP); + if (ebx & BIT(25)) + set_cpu_cap(c, X86_FEATURE_VIRT_SSBD); } #ifdef CONFIG_X86_32 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4f68bf20a67a..7920be85b3df 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -339,6 +339,15 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn) } #endif +static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) +{ + /* + * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL, + * so ssbd_tif_to_spec_ctrl() just works. + */ + wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); +} + static __always_inline void intel_set_ssb_state(unsigned long tifn) { u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); @@ -348,7 +357,9 @@ static __always_inline void intel_set_ssb_state(unsigned long tifn) static __always_inline void __speculative_store_bypass_update(unsigned long tifn) { - if (static_cpu_has(X86_FEATURE_AMD_SSBD)) { + if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) + amd_set_ssb_virt_state(tifn); + else if (static_cpu_has(X86_FEATURE_AMD_SSBD)) amd_set_core_ssb_state(tifn); else intel_set_ssb_state(tifn);