From: Konrad Rzeszutek Wilk Date: Thu, 26 Apr 2018 02:04:24 +0000 (-0400) Subject: x86/bugs/AMD: Add support to disable RDS on Fam[15,16,17]h if requested X-Git-Tag: v4.1.12-124.31.3~770 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=3985ca7fb77e553301a6a0024b3774f1524ba87c;p=users%2Fjedix%2Flinux-maple.git x86/bugs/AMD: Add support to disable RDS on Fam[15,16,17]h if requested AMD does not need the Speculative Store Bypass mitigation to be enabled. The parameters for this are already available and can be done via MSR C001_1020. Each family uses a different bit in that MSR for this. [ tglx: Expose the bit mask via a variable and move the actual MSR fiddling into the bugs code as that's the right thing to do and also required to prepare for dynamic enable/disable ] OraBug: 28041771 CVE: CVE-2018-3639 Suggested-by: Borislav Petkov Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar (cherry picked from commit 764f3c21588a059cd783c6ba0734d4db2d72822d) Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Mihai Carabas Conflicts: arch/x86/include/asm/cpufeatures.h arch/x86/kernel/cpu/amd.c arch/x86/kernel/cpu/bugs.c arch/x86/kernel/cpu/common.c Signed-off-by: Brian Maly --- diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 030f5b99c886..62e963c8f3b6 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -197,6 +197,7 @@ #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ +#define X86_FEATURE_AMD_RDS ( 7*32+18) /* "" AMD RDS implementation */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ #define X86_FEATURE_IBRS ( 7*32+20) /* Control Speculation Control */ #define X86_FEATURE_STIBP ( 7*32+21) /* Single Thread Indirect Branch Predictors */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 026b94603c01..31947522ca6a 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -199,6 +199,10 @@ enum ssb_mitigation { SPEC_STORE_BYPASS_DISABLE, }; +/* AMD specific Speculative Store Bypass MSR data */ +extern u64 x86_amd_ls_cfg_base; +extern u64 x86_amd_ls_cfg_rds_mask; + extern char __indirect_thunk_start[]; extern char __indirect_thunk_end[]; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 5d9632f267a7..0cf61578649e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -493,6 +494,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) /* A random value per boot for bit slice [12:upper_bit) */ va_align.bits = get_random_int() & va_align.mask; } + + if (c->x86 >= 0x15 && c->x86 <= 0x17) { + unsigned int bit; + + switch (c->x86) { + case 0x15: bit = 54; break; + case 0x16: bit = 33; break; + case 0x17: bit = 10; break; + default: return; + } + /* + * Try to cache the base value so further operations can + * avoid RMW. If that faults, do not enable RDS. + */ + if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { + setup_force_cpu_cap(X86_FEATURE_RDS); + setup_force_cpu_cap(X86_FEATURE_AMD_RDS); + x86_amd_ls_cfg_rds_mask = 1ULL << bit; + } + } } static void early_init_amd(struct cpuinfo_x86 *c) @@ -732,6 +753,11 @@ static void init_amd(struct cpuinfo_x86 *c) /* AMD CPUs don't reset SS attributes on SYSRET */ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); + + if (boot_cpu_has(X86_FEATURE_AMD_RDS)) { + set_cpu_cap(c, X86_FEATURE_RDS); + set_cpu_cap(c, X86_FEATURE_AMD_RDS); + } } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c index 2299a4c89dae..0b32cffae91a 100644 --- a/arch/x86/kernel/cpu/bugs_64.c +++ b/arch/x86/kernel/cpu/bugs_64.c @@ -117,6 +117,13 @@ static u64 x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS; u64 x86_spec_ctrl_priv; EXPORT_SYMBOL_GPL(x86_spec_ctrl_priv); +/* + * AMD specific MSR info for Speculative Store Bypass control. + * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu(). + */ +u64 x86_amd_ls_cfg_base; +u64 x86_amd_ls_cfg_rds_mask; + void __init check_bugs(void) { identify_boot_cpu(); @@ -126,7 +133,8 @@ void __init check_bugs(void) #endif /* * Read the SPEC_CTRL MSR to account for reserved bits which may - * have unknown values. + * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD + * init code as it is not enumerated and depends on the family. */ if (boot_cpu_has(X86_FEATURE_IBRS)) { rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); @@ -252,6 +260,15 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) } EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); +static void x86_amd_rds_enable(void) +{ + u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask; + + if (boot_cpu_has(X86_FEATURE_AMD_RDS)) + wrmsrl(MSR_AMD64_LS_CFG, msrval); +} + + /* * Disable retpoline and attempt to fall back to another Spectre v2 mitigation. * If possible, fall back to IBRS and IBPB. @@ -667,6 +684,11 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) switch (cmd) { case SPEC_STORE_BYPASS_CMD_AUTO: + /* + * AMD platforms by default don't need SSB mitigation. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + break; case SPEC_STORE_BYPASS_CMD_ON: mode = SPEC_STORE_BYPASS_DISABLE; break; @@ -693,6 +715,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) x86_spec_ctrl_set(SPEC_CTRL_RDS); break; case X86_VENDOR_AMD: + x86_amd_rds_enable(); break; } } @@ -713,6 +736,9 @@ void x86_spec_ctrl_setup_ap(void) { if (boot_cpu_has(X86_FEATURE_IBRS)) x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); + + if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) + x86_amd_rds_enable(); } #ifdef CONFIG_SYSFS diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 13e40d37246b..1df878c10dfe 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -836,6 +836,10 @@ static const struct x86_cpu_id cpu_no_spec_store_bypass[] = { { X86_VENDOR_CENTAUR, 5, }, { X86_VENDOR_INTEL, 5, }, { X86_VENDOR_NSC, 5, }, + { X86_VENDOR_AMD, 0x12, }, + { X86_VENDOR_AMD, 0x11, }, + { X86_VENDOR_AMD, 0x10, }, + { X86_VENDOR_AMD, 0xf, }, { X86_VENDOR_ANY, 4, }, {} };