]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/intel/spectre_v4: Keep SPEC_CTRL_SSBD when IBRS is in use
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>
Wed, 21 Nov 2018 21:15:25 +0000 (16:15 -0500)
committerBrian Maly <brian.maly@oracle.com>
Tue, 27 Nov 2018 18:08:29 +0000 (13:08 -0500)
When IBRS mitigations are in use, and we are running with prctl or seccomp
SSBD mitigations, we end up not setting SPEC_CTRL_SSBD bit in MSR_IA32_SPEC_CTRL
in DISABLE_IBRS (which is called, for example, when returning from a syscall to
userspace.

Orabug: 28814570

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
arch/x86/include/asm/spec_ctrl.h
arch/x86/kernel/cpu/bugs_64.c
arch/x86/kernel/process.c

index 3c1f6df899cdc9362e807e85f4aefe8f797e7ee2..68ff83e2924771f2d7903c3d76641185badd7cdc 100644 (file)
@@ -40,7 +40,7 @@
        pushq %rdx;                             \
        movl $MSR_IA32_SPEC_CTRL, %ecx;         \
        movl $0, %edx;                          \
-       movl x86_spec_ctrl_base, %eax;          \
+       movl PER_CPU_VAR(x86_spec_ctrl_restore), %eax;          \
        wrmsr;                                  \
        popq %rdx;                              \
        popq %rcx;                              \
        testl   $SPEC_CTRL_IBRS_INUSE, PER_CPU_VAR(cpu_ibrs)
        jz      13f
 
-       testl   $SPEC_CTRL_FEATURE_ENABLE_IBRS, \save_reg
-       jnz     13f
+       cmp     \save_reg, PER_CPU_VAR(x86_spec_ctrl_priv_cpu)
+       j     13f
 
        movl    $MSR_IA32_SPEC_CTRL, %ecx
        movl    $0, %edx
@@ -209,6 +209,7 @@ ALTERNATIVE __stringify(__ASM_STUFF_RSB), "", X86_FEATURE_STUFF_RSB
 /* Defined in bugs_64.c */
 extern u64 x86_spec_ctrl_priv;
 DECLARE_PER_CPU(u64, x86_spec_ctrl_priv_cpu);
+DECLARE_PER_CPU(u64, x86_spec_ctrl_restore);
 extern u64 x86_spec_ctrl_base;
 
 /*
@@ -241,6 +242,7 @@ DECLARE_STATIC_KEY_FALSE(retpoline_enabled_key);
 static inline void update_cpu_spec_ctrl(int cpu)
 {
        per_cpu(x86_spec_ctrl_priv_cpu, cpu) = x86_spec_ctrl_priv;
+       per_cpu(x86_spec_ctrl_restore, cpu) = x86_spec_ctrl_base;
 }
 
 static inline void update_cpu_spec_ctrl_all(void)
index ad562d8e91d0fb592a9da7559208a32528309e6a..6827e4fb105c64d832d42efdfb7b75b1ca77bbd8 100644 (file)
@@ -153,6 +153,8 @@ u64 x86_spec_ctrl_priv;
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_priv);
 DEFINE_PER_CPU(u64, x86_spec_ctrl_priv_cpu) = 0;
 EXPORT_PER_CPU_SYMBOL(x86_spec_ctrl_priv_cpu);
+DEFINE_PER_CPU(u64, x86_spec_ctrl_restore) = 0;
+EXPORT_PER_CPU_SYMBOL(x86_spec_ctrl_restore);
 
 /*
  * AMD specific MSR info for Speculative Store Bypass control.
index 59cda6f04066cb079d9306362811c06780645fe7..5c73161d3da7a4b69aa8a35636f851aaea91e082 100644 (file)
@@ -352,6 +352,7 @@ static __always_inline void intel_set_ssb_state(unsigned long tifn)
 {
        u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
 
+       this_cpu_write(x86_spec_ctrl_restore,  msr);
        wrmsrl(MSR_IA32_SPEC_CTRL, msr);
 }