pushq %rdx; \
movl $MSR_IA32_SPEC_CTRL, %ecx; \
movl $0, %edx; \
- movl x86_spec_ctrl_priv, %eax; \
+ movl PER_CPU_VAR(x86_spec_ctrl_priv_cpu), %eax; \
wrmsr; \
popq %rdx; \
popq %rcx; \
#define __ASM_ENABLE_IBRS_CLOBBER \
movl $MSR_IA32_SPEC_CTRL, %ecx; \
movl $0, %edx; \
- movl x86_spec_ctrl_priv, %eax; \
+ movl PER_CPU_VAR(x86_spec_ctrl_priv_cpu), %eax; \
wrmsr;
#define __ASM_DISABLE_IBRS \
pushq %rax; \
/* Defined in bugs_64.c */
extern u64 x86_spec_ctrl_priv;
+DECLARE_PER_CPU(u64, x86_spec_ctrl_priv_cpu);
extern u64 x86_spec_ctrl_base;
/*
#define ibrs_inuse (cpu_ibrs_inuse())
+static inline void update_cpu_spec_ctrl(int cpu)
+{
+ per_cpu(x86_spec_ctrl_priv_cpu, cpu) = x86_spec_ctrl_priv;
+}
+
+static inline void update_cpu_spec_ctrl_all(void)
+{
+ int cpu_index;
+
+ for_each_online_cpu(cpu_index)
+ update_cpu_spec_ctrl(cpu_index);
+}
+
static inline void update_cpu_ibrs(struct cpuinfo_x86 *cpu)
{
struct cpuinfo_x86 *cpu_info;
sysctl_ibrs_enabled = true;
/* When entering kernel */
x86_spec_ctrl_priv |= SPEC_CTRL_FEATURE_ENABLE_IBRS;
+ /* Update per-cpu spec_ctrl */
+ update_cpu_spec_ctrl_all();
return true;
} else {
return false;
/* Update what sysfs shows. */
sysctl_ibrs_enabled = false;
/*
- * This is stricly not needed as the use_ibrs guards against the
- * the use of the MSR so these values wouldn't be touched.
- */
- x86_spec_ctrl_priv &= ~(SPEC_CTRL_FEATURE_ENABLE_IBRS);
-
+ * This is stricly not needed as the use_ibrs guards against the
+ * the use of the MSR so these values wouldn't be touched.
+ */
+ x86_spec_ctrl_priv &= ~(SPEC_CTRL_FEATURE_ENABLE_IBRS);
+ update_cpu_spec_ctrl_all();
}
static inline int check_ibrs_inuse(void)
*/
u64 x86_spec_ctrl_priv;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_priv);
+DEFINE_PER_CPU(u64, x86_spec_ctrl_priv_cpu) = 0;
+EXPORT_PER_CPU_SYMBOL(x86_spec_ctrl_priv_cpu);
/*
* AMD specific MSR info for Speculative Store Bypass control.
x86_spec_ctrl_base &= ~(SPEC_CTRL_IBRS);
}
x86_spec_ctrl_priv = x86_spec_ctrl_base;
+ update_cpu_spec_ctrl_all();
}
/* Allow STIBP in MSR_SPEC_CTRL if supported */
*/
if (ssbd_ibrs_selected()) {
if (val & SPEC_CTRL_IBRS)
- host = x86_spec_ctrl_priv;
+ host = this_cpu_read(x86_spec_ctrl_priv_cpu);
else
host = val & ~(SPEC_CTRL_SSBD);
} else {
if (ibrs_inuse)
- host = x86_spec_ctrl_priv;
+ host = this_cpu_read(x86_spec_ctrl_priv_cpu);
else
host = x86_spec_ctrl_base;
host |= val;
* Except on IBRS we don't want to use host base value
* but rather the privilege value which has IBRS set.
*/
- hostval = x86_spec_ctrl_priv;
+ hostval = this_cpu_read(x86_spec_ctrl_priv);
guestval = hostval & ~x86_spec_ctrl_mask;
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
switch (cmd) {
case SPEC_STORE_BYPASS_CMD_AUTO:
- /* Choose prctl as the default mode unless IBRS is enabled. */
- if (spectre_v2_enabled == SPECTRE_V2_IBRS) {
- mode = SPEC_STORE_BYPASS_USERSPACE;
- break;
- }
case SPEC_STORE_BYPASS_CMD_SECCOMP:
/*
* Choose prctl+seccomp as the default mode if seccomp is
break;
}
- if (spectre_v2_enabled == SPECTRE_V2_IBRS) {
- switch (mode) {
- case SPEC_STORE_BYPASS_SECCOMP:
- case SPEC_STORE_BYPASS_PRCTL:
- /* Not much we can do except switch the mode to userspace. */
- pr_info("from '%s' to '%s' as IBRS is enabled\n",
- ssb_strings[mode], ssb_strings[SPEC_STORE_BYPASS_USERSPACE]);
- mode = SPEC_STORE_BYPASS_USERSPACE;
- break;
- case SPEC_STORE_BYPASS_DISABLE:
- /* Need to set the x86_spec_ctrl_mask and friends. */
- break;
- default:
- break;
- }
- }
-
/*
* We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
}
else
x86_spec_ctrl_priv &= ~(SPEC_CTRL_SSBD);
+
+ update_cpu_spec_ctrl_all();
break;
case X86_VENDOR_AMD:
if (mode == SPEC_STORE_BYPASS_DISABLE)