]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/bugs/IBRS: Disable SSB (RDS) if IBRS is sslected for spectre_v2.
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Fri, 4 May 2018 06:15:44 +0000 (06:15 +0000)
committerBrian Maly <brian.maly@oracle.com>
Mon, 21 May 2018 22:04:53 +0000 (18:04 -0400)
If =userspace is selected we want frob the SPEC_CTRL MSR on every
userspace entrace (disable memory disambigation), and also on every
kernel entrace (enable memory disambiguation). However we have
to be careful as having MSR frobbed and retpoline being enabled
slows the machine even further.

Therefore if possible swap over to using SPEC_CTRL MSR (IBRS) on
every kernel entrace instead of using retpoline.

Naturally this heuristic is controlled by various knobs.

To summarize, if "spectre_v2=retpoline spec_store_bypass_disable=userspace"
is set then we will switch the spectre_v2 to IBRS.

This table may explain this better:
effect    | spectre_v2  | spec_store_bypass_disable | remark
==========+=============+===========================+======
IBRS      | ibrs        | userspace                 |
IBRS      | auto        | userspace                 | *1 *2
IBRS      | retpoline   | userspace                 | *1
IBRS      | ibrs        | boot                      |
retpoline | auto        | boot                      |
retpoline | retpoline   | boot                      |
retpoline | auto        | boot                      |
retpoline | auto        | auto                      |

*1: If spectre_v2_heuristic=off or spectre_v2_heuristic=rds=off
is selected then the spec_store_bypass_disable=userspace parameter
is not followed and the effect is both retpoline and IBRS enabled
in the kernel.

*2: If we run under Skylake+ the 'spec_store_bypass_disable=auto'
will disable retpoline and enable IBRS. If not on Skylake+, then
retpoline and IBRS are both enabled.

OraBug: 28041771
CVE: CVE-2018-3639

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
Documentation/kernel-parameters.txt
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs_64.c

index 2b1a2bd170a9522882a60c138adf32cebc09fa59..f5eeb7ae5b55fff3e4f03b86000309e7ed68eced 100644 (file)
@@ -3523,6 +3523,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                                 mitigation if a module is
                                                 loaded that was not compiled
                                                 with retpoline.
+                       rds=off         - do not activate the Speculative Store Bypass
+                                          mitigation if doing IBRS.
 
        spec_store_bypass_disable=
                        [HW] Control Speculative Store Bypass (SSB) Disable mitigation
@@ -3554,6 +3556,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                 via prctl. Speculative Store Bypass is enabled
                                 for a process by default. The state of the control
                                 is inherited on fork.
+                       userspace - Disable Speculative Store Bypass when entering
+                                userspace.
 
                        Not specifying this option is equivalent to
                        spec_store_bypass_disable=auto.
index f6474ab127d4afdd266049e66b960b03e6de2067..434a7807b80a0406e37722bdbf98a7839cf3ada9 100644 (file)
@@ -188,6 +188,7 @@ enum ssb_mitigation {
        SPEC_STORE_BYPASS_NONE,
        SPEC_STORE_BYPASS_DISABLE,
        SPEC_STORE_BYPASS_PRCTL,
+       SPEC_STORE_BYPASS_USERSPACE,
 };
 
 extern char __indirect_thunk_start[];
index ca8aad8fb3d47b7dfe84bc793d562f756ab498bf..60e2ecd3cabea98aa989f3a172efc21d8b5c2253 100644 (file)
@@ -47,6 +47,8 @@ EXPORT_SYMBOL(spec_ctrl_mutex);
 bool use_ibrs_on_skylake = true;
 EXPORT_SYMBOL(use_ibrs_on_skylake);
 
+bool use_ibrs_with_rds = true;
+
 /*
  * retpoline_fallback flags:
  * SPEC_CTRL_USE_RETPOLINE_FALLBACK    pick retpoline fallback mitigation
@@ -63,6 +65,7 @@ int __init spectre_v2_heuristics_setup(char *p)
                /* Disable all heuristics. */
                if (!strncmp(p, "off", 3)) {
                        use_ibrs_on_skylake = false;
+                       use_ibrs_with_rds = false;
                        clear_retpoline_fallback();
                        break;
                }
@@ -86,6 +89,16 @@ int __init spectre_v2_heuristics_setup(char *p)
                        if (!strncmp(p, "off", 3))
                                clear_retpoline_fallback();
                }
+               len = strlen("rds");
+               if (!strncmp(p, "rds", len)) {
+                       p += len;
+                       if (*p == '=')
+                               ++p;
+                       if (*p == '\0')
+                               break;
+                       if (!strncmp(p, "off", 3))
+                               use_ibrs_with_rds = false;
+               }
 
                p = strpbrk(p, ",");
                if (!p)
@@ -98,6 +111,7 @@ __setup("spectre_v2_heuristics=", spectre_v2_heuristics_setup);
 
 static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
+static bool rds_ibrs_selected(void);
 
 /*
  * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
@@ -223,14 +237,18 @@ void x86_spec_ctrl_set(u64 val)
                /*
                 * Only two states are allowed - with IBRS or without.
                 */
-               if (check_ibrs_inuse()) {
+               if (rds_ibrs_selected()) {
                        if (val & SPEC_CTRL_IBRS)
                                host = x86_spec_ctrl_priv;
                        else
-                               host = val;
-               } else
-                       host = x86_spec_ctrl_base | val;
-
+                               host = val & ~(SPEC_CTRL_RDS);
+               } else {
+                       if (ibrs_inuse)
+                               host = x86_spec_ctrl_priv;
+                       else
+                               host = x86_spec_ctrl_base;
+                       host |= val;
+               }
                wrmsrl(MSR_IA32_SPEC_CTRL, host);
        }
 }
@@ -587,7 +605,8 @@ retpoline_auto:
                         */
                        if (!retp_compiler() /* prefer IBRS over minimal ASM */ ||
                            (retp_compiler() && !retpoline_selected(cmd) &&
-                            is_skylake_era() && use_ibrs_on_skylake)) {
+                            ((is_skylake_era() && use_ibrs_on_skylake) ||
+                             (rds_ibrs_selected() && use_ibrs_with_rds)))) {
                                /* Start the engine! */
                                mode = ibrs_select();
                                if (mode == SPECTRE_V2_IBRS)
@@ -643,18 +662,25 @@ out:
 
 static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
 
+bool rds_ibrs_selected(void)
+{
+       return (ssb_mode == SPEC_STORE_BYPASS_USERSPACE);
+}
+
 /* The kernel command line selection */
 enum ssb_mitigation_cmd {
        SPEC_STORE_BYPASS_CMD_NONE,
        SPEC_STORE_BYPASS_CMD_AUTO,
        SPEC_STORE_BYPASS_CMD_ON,
        SPEC_STORE_BYPASS_CMD_PRCTL,
+       SPEC_STORE_BYPASS_CMD_USERSPACE,
 };
 
 static const char *ssb_strings[] = {
        [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
        [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
-       [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl"
+       [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
+       [SPEC_STORE_BYPASS_USERSPACE]   = "Mitigation: Speculative Store Bypass disabled for userspace"
 };
 
 static const struct {
@@ -665,6 +691,8 @@ static const struct {
        { "on",         SPEC_STORE_BYPASS_CMD_ON    }, /* Disable Speculative Store Bypass */
        { "off",        SPEC_STORE_BYPASS_CMD_NONE  }, /* Don't touch Speculative Store Bypass */
        { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
+       { "userspace",  SPEC_STORE_BYPASS_CMD_USERSPACE }, /* Disable Speculative Store Bypass for userspace */
+
 };
 
 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
@@ -714,8 +742,11 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
 
        switch (cmd) {
        case SPEC_STORE_BYPASS_CMD_AUTO:
-               /* Choose prctl as the default mode */
-               mode = SPEC_STORE_BYPASS_PRCTL;
+               /* Choose prctl as the default mode unless IBRS is enabled. */
+               if (spectre_v2_enabled == SPECTRE_V2_IBRS)
+                       mode = SPEC_STORE_BYPASS_USERSPACE;
+               else
+                       mode = SPEC_STORE_BYPASS_PRCTL;
                break;
        case SPEC_STORE_BYPASS_CMD_ON:
                mode = SPEC_STORE_BYPASS_DISABLE;
@@ -723,6 +754,10 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
        case SPEC_STORE_BYPASS_CMD_PRCTL:
                mode = SPEC_STORE_BYPASS_PRCTL;
                break;
+       case SPEC_STORE_BYPASS_CMD_USERSPACE:
+               if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+                       mode = SPEC_STORE_BYPASS_USERSPACE;
+               break;
        case SPEC_STORE_BYPASS_CMD_NONE:
                break;
        }
@@ -733,8 +768,11 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
         *  - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
         *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
         */
-       if (mode != SPEC_STORE_BYPASS_DISABLE) {
+       if (mode != SPEC_STORE_BYPASS_DISABLE)
                setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
+
+       if (mode == SPEC_STORE_BYPASS_DISABLE ||
+           mode == SPEC_STORE_BYPASS_USERSPACE) {
                /*
                 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
                 * a completely different MSR and bit dependent on family.
@@ -742,11 +780,15 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
                switch (boot_cpu_data.x86_vendor) {
                case X86_VENDOR_INTEL:
                        x86_spec_ctrl_base |= SPEC_CTRL_RDS;
-                       x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
-                       x86_spec_ctrl_set(SPEC_CTRL_RDS);
+                       if (mode == SPEC_STORE_BYPASS_DISABLE) {
+                               x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
+                               x86_spec_ctrl_set(SPEC_CTRL_RDS);
+                       } else
+                               x86_spec_ctrl_priv &= ~(SPEC_CTRL_RDS);
                        break;
                case X86_VENDOR_AMD:
-                       x86_amd_rds_enable();
+                       if (mode == SPEC_STORE_BYPASS_DISABLE)
+                               x86_amd_rds_enable();
                        break;
                }
        }
@@ -784,6 +826,7 @@ static int ssb_prctl_set(unsigned long ctrl)
 static int ssb_prctl_get(void)
 {
        switch (ssb_mode) {
+       case SPEC_STORE_BYPASS_USERSPACE:
        case SPEC_STORE_BYPASS_DISABLE:
                return PR_SPEC_DISABLE;
        case SPEC_STORE_BYPASS_PRCTL:
@@ -822,7 +865,7 @@ int arch_prctl_spec_ctrl_get(unsigned long which)
 
 void x86_spec_ctrl_setup_ap(void)
 {
-       if (boot_cpu_has(X86_FEATURE_IBRS))
+       if (boot_cpu_has(X86_FEATURE_IBRS) && ssb_mode != SPEC_STORE_BYPASS_USERSPACE)
                x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
 
        if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)