]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/bugs: ssbd_ibrs_selected called prematurely
authorDaniel Jordan <daniel.m.jordan@oracle.com>
Wed, 18 Jul 2018 16:23:25 +0000 (09:23 -0700)
committerBrian Maly <brian.maly@oracle.com>
Mon, 22 Oct 2018 17:05:42 +0000 (13:05 -0400)
spectre_v2_select_mitigation considers the SSB mitigation that's been
selected in its own decision; specifically, it calls ssbd_ibrs_selected
when deciding whether to use IBRS.  The problem is, the SSB mitigation
hasn't been chosen yet, so ssbd_ibrs_selected will always return the
same result, possibly the wrong one.

Address this by splitting SSB initialization into two phases.  The
first, ssb_select_mitigation, picks the SSB mitigation mode, getting far
enough for spectre_v2_select_mitigation's needs, and the second,
ssb_init, sets up the necessary SSB state based on the mode.  The second
phase is necessary because it relies on the outcome of
spectre_v2_select_mitigation.

Fixes: 648702f587df ("x86/bugs/IBRS: Disable SSB (RDS) if IBRS is selected for spectre_v2.")
Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
(cherry picked from commit 86a51ae1ce42d9f76e09e3c87c20c3625c6a1a6c)

Orabug: 28788839

Signed-off-by: Anjali Kulkarni <anjali.k.kulkarni@oracle.com>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
arch/x86/kernel/cpu/bugs_64.c

index 07eefe862036bf48b729351e40c100cc636d64e9..809f85a36be9bb917d2dab6aee7138d3cacc8c92 100644 (file)
@@ -116,10 +116,13 @@ int __init spectre_v2_heuristics_setup(char *p)
 __setup("spectre_v2_heuristics=", spectre_v2_heuristics_setup);
 
 static void __init spectre_v2_select_mitigation(void);
-static void __init ssb_select_mitigation(void);
+static enum ssb_mitigation __init ssb_select_mitigation(void);
+static void __init ssb_init(void);
 static bool ssbd_ibrs_selected(void);
 static void __init l1tf_select_mitigation(void);
 
+static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
+
 /*
  * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
  * writes to SPEC_CTRL contain whatever reserved bits have been set.
@@ -182,14 +185,17 @@ void __init check_bugs(void)
        if (boot_cpu_has(X86_FEATURE_STIBP))
                x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
 
-       /* Select the proper spectre mitigation before patching alternatives */
-       spectre_v2_select_mitigation();
-
        /*
         * Select proper mitigation for any exposure to the Speculative Store
-        * Bypass vulnerability.
+        * Bypass vulnerability.  Required by spectre_v2_select_mitigation.
         */
-       ssb_select_mitigation();
+       ssb_mode = ssb_select_mitigation();
+
+       /* Select the proper spectre mitigation before patching alternatives */
+       spectre_v2_select_mitigation();
+
+       /* Relies on the result of spectre_v2_select_mitigation. */
+       ssb_init();
 
        l1tf_select_mitigation();
 
@@ -696,8 +702,6 @@ out:
 
 #define pr_fmt(fmt)    "Speculative Store Bypass: " fmt
 
-static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
-
 bool ssbd_ibrs_selected(void)
 {
        return (ssb_mode == SPEC_STORE_BYPASS_USERSPACE);
@@ -764,7 +768,7 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
        return cmd;
 }
 
-static enum ssb_mitigation __init __ssb_select_mitigation(void)
+static enum ssb_mitigation __init ssb_select_mitigation(void)
 {
        enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
        enum ssb_mitigation_cmd cmd;
@@ -804,17 +808,22 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
                break;
        }
 
+       return mode;
+}
+
+static void __init ssb_init(void)
+{
        /*
         * We have three CPU feature flags that are in play here:
         *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
         *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
         *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
         */
-       if (mode != SPEC_STORE_BYPASS_DISABLE)
+       if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
                setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
 
-       if (mode == SPEC_STORE_BYPASS_DISABLE ||
-           mode == SPEC_STORE_BYPASS_USERSPACE) {
+       if (ssb_mode == SPEC_STORE_BYPASS_DISABLE ||
+           ssb_mode == SPEC_STORE_BYPASS_USERSPACE) {
                /*
                 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
                 * a completely different MSR and bit dependent on family.
@@ -824,7 +833,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
                        x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
                        x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
 
-                       if (mode == SPEC_STORE_BYPASS_DISABLE) {
+                       if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
                                x86_spec_ctrl_set(SPEC_CTRL_SSBD);
                                if (spectre_v2_enabled == SPECTRE_V2_IBRS) {
                                        x86_spec_ctrl_priv |= SPEC_CTRL_SSBD;
@@ -836,18 +845,11 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
                        update_cpu_spec_ctrl_all();
                        break;
                case X86_VENDOR_AMD:
-                       if (mode == SPEC_STORE_BYPASS_DISABLE)
+                       if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
                                x86_amd_ssbd_enable();
                        break;
                }
        }
-       return mode;
-}
-
-static void ssb_select_mitigation(void)
-{
-       ssb_mode = __ssb_select_mitigation();
-
        if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
                pr_info("%s\n", ssb_strings[ssb_mode]);
 }