AMD does not need the Speculative Store Bypass mitigation to be enabled.
The parameters for this are already available and can be done via MSR
C001_1020. Each family uses a different bit in that MSR for this.
[ tglx: Expose the bit mask via a variable and move the actual MSR fiddling
into the bugs code as that's the right thing to do and also required
to prepare for dynamic enable/disable ]
OraBug:
28041771
CVE: CVE-2018-3639
Suggested-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
(cherry picked from commit
764f3c21588a059cd783c6ba0734d4db2d72822d)
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
Conflicts:
arch/x86/include/asm/cpufeatures.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
Signed-off-by: Brian Maly <brian.maly@oracle.com>
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_AMD_RDS ( 7*32+18) /* "" AMD RDS implementation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
#define X86_FEATURE_IBRS ( 7*32+20) /* Control Speculation Control */
#define X86_FEATURE_STIBP ( 7*32+21) /* Single Thread Indirect Branch Predictors */
SPEC_STORE_BYPASS_DISABLE,
};
+/* AMD specific Speculative Store Bypass MSR data */
+extern u64 x86_amd_ls_cfg_base;
+extern u64 x86_amd_ls_cfg_rds_mask;
+
extern char __indirect_thunk_start[];
extern char __indirect_thunk_end[];
#include <asm/processor.h>
#include <asm/apic.h>
#include <asm/cpu.h>
+#include <asm/nospec-branch.h>
#include <asm/smp.h>
#include <asm/pci-direct.h>
/* A random value per boot for bit slice [12:upper_bit) */
va_align.bits = get_random_int() & va_align.mask;
}
+
+ if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+ unsigned int bit;
+
+ switch (c->x86) {
+ case 0x15: bit = 54; break;
+ case 0x16: bit = 33; break;
+ case 0x17: bit = 10; break;
+ default: return;
+ }
+ /*
+ * Try to cache the base value so further operations can
+ * avoid RMW. If that faults, do not enable RDS.
+ */
+ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+ setup_force_cpu_cap(X86_FEATURE_RDS);
+ setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
+ x86_amd_ls_cfg_rds_mask = 1ULL << bit;
+ }
+ }
}
static void early_init_amd(struct cpuinfo_x86 *c)
/* AMD CPUs don't reset SS attributes on SYSRET */
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+
+ if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
+ set_cpu_cap(c, X86_FEATURE_RDS);
+ set_cpu_cap(c, X86_FEATURE_AMD_RDS);
+ }
}
#ifdef CONFIG_X86_32
u64 x86_spec_ctrl_priv;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_priv);
+/*
+ * AMD specific MSR info for Speculative Store Bypass control.
+ * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
+ */
+u64 x86_amd_ls_cfg_base;
+u64 x86_amd_ls_cfg_rds_mask;
+
void __init check_bugs(void)
{
identify_boot_cpu();
#endif
/*
* Read the SPEC_CTRL MSR to account for reserved bits which may
- * have unknown values.
+ * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+ * init code as it is not enumerated and depends on the family.
*/
if (boot_cpu_has(X86_FEATURE_IBRS)) {
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+static void x86_amd_rds_enable(void)
+{
+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
+
+ if (boot_cpu_has(X86_FEATURE_AMD_RDS))
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+}
+
+
/*
* Disable retpoline and attempt to fall back to another Spectre v2 mitigation.
* If possible, fall back to IBRS and IBPB.
switch (cmd) {
case SPEC_STORE_BYPASS_CMD_AUTO:
+ /*
+ * AMD platforms by default don't need SSB mitigation.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ break;
case SPEC_STORE_BYPASS_CMD_ON:
mode = SPEC_STORE_BYPASS_DISABLE;
break;
x86_spec_ctrl_set(SPEC_CTRL_RDS);
break;
case X86_VENDOR_AMD:
+ x86_amd_rds_enable();
break;
}
}
{
if (boot_cpu_has(X86_FEATURE_IBRS))
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+ x86_amd_rds_enable();
}
#ifdef CONFIG_SYSFS
{ X86_VENDOR_CENTAUR, 5, },
{ X86_VENDOR_INTEL, 5, },
{ X86_VENDOR_NSC, 5, },
+ { X86_VENDOR_AMD, 0x12, },
+ { X86_VENDOR_AMD, 0x11, },
+ { X86_VENDOR_AMD, 0x10, },
+ { X86_VENDOR_AMD, 0xf, },
{ X86_VENDOR_ANY, 4, },
{}
};