]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/speculation: Support per-process SSBD with IBRS
authorAlexandre Chartre <alexandre.chartre@oracle.com>
Mon, 9 Jul 2018 14:53:05 +0000 (16:53 +0200)
committerBrian Maly <brian.maly@oracle.com>
Tue, 7 Aug 2018 17:21:43 +0000 (13:21 -0400)
Currently per-process SSBD can't be used with IBRS because both SSBD
and IBRS have to concurrently modify the spec_ctrl state which is globally
stored across all cpus. As a consequence, when using IBRS, Speculative
Store Bypass is disabled for userspace and it can't be used per-process
with prctl.

Commit fabdd62357ac ("x86/speculation: Implement per-cpu IBRS control")
has implemented per-cpu IBRS control and a similar change is needed to
maintain part of the spec_ctrl state (x86_spec_ctrl_priv) per-cpu too.
Then spec_ctrl state will be maintained per-cpu and per-process SSBD
can be used with IBRS.

Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
(cherry picked from UEK5 commit 9275aeff3b17bf85c7fe99d98d73b69628c14796)

Orabug: 28354043

[Backport: assembly code changes in spec_ctrl.h are similar but the
 code is organized slightly differently on UEK5 and UEK4. On UEK5,
 changes are directly in assembly macros (.macro). On UEK4, changes
 are in preprocessor macros (#define) which are used by assembly macros.]

Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
arch/x86/include/asm/spec_ctrl.h
arch/x86/kernel/cpu/bugs_64.c
arch/x86/kernel/cpu/scattered.c

index 656cca673a547e7a984f5d51a41d0ec0e7ec4772..1abaf122a6e8218bd65a606f8d0de9ab9c3007d2 100644 (file)
@@ -24,7 +24,7 @@
        pushq %rdx;                             \
        movl $MSR_IA32_SPEC_CTRL, %ecx;         \
        movl $0, %edx;                          \
-       movl x86_spec_ctrl_priv, %eax;          \
+       movl PER_CPU_VAR(x86_spec_ctrl_priv_cpu), %eax; \
        wrmsr;                                  \
        popq %rdx;                              \
        popq %rcx;                              \
@@ -32,7 +32,7 @@
 #define __ASM_ENABLE_IBRS_CLOBBER              \
        movl $MSR_IA32_SPEC_CTRL, %ecx;         \
        movl $0, %edx;                          \
-       movl x86_spec_ctrl_priv, %eax;          \
+       movl PER_CPU_VAR(x86_spec_ctrl_priv_cpu), %eax; \
        wrmsr;
 #define __ASM_DISABLE_IBRS                     \
        pushq %rax;                             \
@@ -208,6 +208,7 @@ ALTERNATIVE __stringify(__ASM_STUFF_RSB), "", X86_FEATURE_STUFF_RSB
 
 /* Defined in bugs_64.c */
 extern u64 x86_spec_ctrl_priv;
+DECLARE_PER_CPU(u64, x86_spec_ctrl_priv_cpu);
 extern u64 x86_spec_ctrl_base;
 
 /*
@@ -235,6 +236,19 @@ extern void unprotected_firmware_end(void);
 
 #define ibrs_inuse             (cpu_ibrs_inuse())
 
+static inline void update_cpu_spec_ctrl(int cpu)
+{
+       per_cpu(x86_spec_ctrl_priv_cpu, cpu) = x86_spec_ctrl_priv;
+}
+
+static inline void update_cpu_spec_ctrl_all(void)
+{
+       int cpu_index;
+
+       for_each_online_cpu(cpu_index)
+               update_cpu_spec_ctrl(cpu_index);
+}
+
 static inline void update_cpu_ibrs(struct cpuinfo_x86 *cpu)
 {
        struct cpuinfo_x86 *cpu_info;
@@ -265,6 +279,8 @@ static inline bool set_ibrs_inuse(void)
                sysctl_ibrs_enabled = true;
                /* When entering kernel */
                x86_spec_ctrl_priv |= SPEC_CTRL_FEATURE_ENABLE_IBRS;
+               /* Update per-cpu spec_ctrl */
+               update_cpu_spec_ctrl_all();
                return true;
        } else {
                return false;
@@ -278,11 +294,11 @@ static inline void clear_ibrs_inuse(void)
        /* Update what sysfs shows. */
        sysctl_ibrs_enabled = false;
        /*
-        * This is stricly not needed as the use_ibrs guards against the
-        * the use of the MSR so these values wouldn't be touched.
-        */
-       x86_spec_ctrl_priv &= ~(SPEC_CTRL_FEATURE_ENABLE_IBRS);
-
+        * This is stricly not needed as the use_ibrs guards against the
+        * the use of the MSR so these values wouldn't be touched.
+        */
+       x86_spec_ctrl_priv &= ~(SPEC_CTRL_FEATURE_ENABLE_IBRS);
+       update_cpu_spec_ctrl_all();
 }
 
 static inline int check_ibrs_inuse(void)
index 632fd6d5808b5d0f452e2a0149323e684443dca4..d83265ce5788d7cda0c964d4c1aef4dec8d6b110 100644 (file)
@@ -135,6 +135,8 @@ static u64 x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
  */
 u64 x86_spec_ctrl_priv;
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_priv);
+DEFINE_PER_CPU(u64, x86_spec_ctrl_priv_cpu) = 0;
+EXPORT_PER_CPU_SYMBOL(x86_spec_ctrl_priv_cpu);
 
 /*
  * AMD specific MSR info for Speculative Store Bypass control.
@@ -162,6 +164,7 @@ void __init check_bugs(void)
                        x86_spec_ctrl_base &= ~(SPEC_CTRL_IBRS);
                }
                x86_spec_ctrl_priv = x86_spec_ctrl_base;
+               update_cpu_spec_ctrl_all();
        }
 
        /* Allow STIBP in MSR_SPEC_CTRL if supported */
@@ -246,12 +249,12 @@ void x86_spec_ctrl_set(u64 val)
                 */
                if (ssbd_ibrs_selected()) {
                        if (val & SPEC_CTRL_IBRS)
-                               host = x86_spec_ctrl_priv;
+                               host = this_cpu_read(x86_spec_ctrl_priv_cpu);
                        else
                                host = val & ~(SPEC_CTRL_SSBD);
                } else {
                        if (ibrs_inuse)
-                               host = x86_spec_ctrl_priv;
+                               host = this_cpu_read(x86_spec_ctrl_priv_cpu);
                        else
                                host = x86_spec_ctrl_base;
                        host |= val;
@@ -278,7 +281,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
                         * Except on IBRS we don't want to use host base value
                         * but rather the privilege value which has IBRS set.
                         */
-                       hostval = x86_spec_ctrl_priv;
+                       hostval = this_cpu_read(x86_spec_ctrl_priv);
 
                guestval = hostval & ~x86_spec_ctrl_mask;
                guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
@@ -775,11 +778,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
 
        switch (cmd) {
        case SPEC_STORE_BYPASS_CMD_AUTO:
-               /* Choose prctl as the default mode unless IBRS is enabled. */
-               if (spectre_v2_enabled == SPECTRE_V2_IBRS) {
-                       mode = SPEC_STORE_BYPASS_USERSPACE;
-                       break;
-               }
        case SPEC_STORE_BYPASS_CMD_SECCOMP:
                /*
                 * Choose prctl+seccomp as the default mode if seccomp is
@@ -804,23 +802,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
                break;
        }
 
-       if (spectre_v2_enabled == SPECTRE_V2_IBRS) {
-               switch (mode) {
-               case SPEC_STORE_BYPASS_SECCOMP:
-               case SPEC_STORE_BYPASS_PRCTL:
-                       /* Not much we can do except switch the mode to userspace. */
-                       pr_info("from '%s' to '%s' as IBRS is enabled\n",
-                               ssb_strings[mode], ssb_strings[SPEC_STORE_BYPASS_USERSPACE]);
-                       mode = SPEC_STORE_BYPASS_USERSPACE;
-                       break;
-               case SPEC_STORE_BYPASS_DISABLE:
-                       /* Need to set the x86_spec_ctrl_mask and friends. */
-                       break;
-               default:
-                       break;
-               }
-       }
-
        /*
         * We have three CPU feature flags that are in play here:
         *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
@@ -849,6 +830,8 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
                        }
                        else
                                x86_spec_ctrl_priv &= ~(SPEC_CTRL_SSBD);
+
+                       update_cpu_spec_ctrl_all();
                        break;
                case X86_VENDOR_AMD:
                        if (mode == SPEC_STORE_BYPASS_DISABLE)
index e524417b72607c36e304aa475bd0fc242d270dcd..5c00d82e49d984a9deb269e8d9549c911276fc48 100644 (file)
@@ -205,6 +205,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c,
        if (!xen_pv_domain()) {
                mutex_lock(&spec_ctrl_mutex);
                update_cpu_ibrs(c);
+               update_cpu_spec_ctrl(c->cpu_index);
                mutex_unlock(&spec_ctrl_mutex);
        }
 }