]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
x86/bugs: Report Intel retbleed vulnerability
authorPeter Zijlstra <peterz@infradead.org>
Fri, 24 Jun 2022 11:48:58 +0000 (13:48 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 25 Jul 2022 09:26:43 +0000 (11:26 +0200)
commit 6ad0ad2bf8a67e27d1f9d006a1dabb0e1c360cc3 upstream.

Skylake suffers from RSB underflow speculation issues; report this
vulnerability and it's mitigation (spectre_v2=ibrs).

  [jpoimboe: cleanups, eibrs]

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/msr-index.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c

index 96973d19797233974130b4bd3c5d18a8e10217eb..77a55777e002ba5efc457de8fe2944191fd438dc 100644 (file)
@@ -91,6 +91,7 @@
 #define MSR_IA32_ARCH_CAPABILITIES     0x0000010a
 #define ARCH_CAP_RDCL_NO               BIT(0)  /* Not susceptible to Meltdown */
 #define ARCH_CAP_IBRS_ALL              BIT(1)  /* Enhanced IBRS support */
+#define ARCH_CAP_RSBA                  BIT(2)  /* RET may use alternative branch predictors */
 #define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3)  /* Skip L1D flush on vmentry */
 #define ARCH_CAP_SSB_NO                        BIT(4)  /*
                                                 * Not susceptible to Speculative Store Bypass
index 58f48f93e8cc556f0887c8f6f43552ebba40f37f..a2423d4ed29350a56009a1bb53033d081d6b3a1a 100644 (file)
@@ -746,12 +746,17 @@ static int __init nospectre_v1_cmdline(char *str)
 }
 early_param("nospectre_v1", nospectre_v1_cmdline);
 
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+       SPECTRE_V2_NONE;
+
 #undef pr_fmt
 #define pr_fmt(fmt)     "RETBleed: " fmt
 
 enum retbleed_mitigation {
        RETBLEED_MITIGATION_NONE,
        RETBLEED_MITIGATION_UNRET,
+       RETBLEED_MITIGATION_IBRS,
+       RETBLEED_MITIGATION_EIBRS,
 };
 
 enum retbleed_mitigation_cmd {
@@ -763,6 +768,8 @@ enum retbleed_mitigation_cmd {
 const char * const retbleed_strings[] = {
        [RETBLEED_MITIGATION_NONE]      = "Vulnerable",
        [RETBLEED_MITIGATION_UNRET]     = "Mitigation: untrained return thunk",
+       [RETBLEED_MITIGATION_IBRS]      = "Mitigation: IBRS",
+       [RETBLEED_MITIGATION_EIBRS]     = "Mitigation: Enhanced IBRS",
 };
 
 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
@@ -805,6 +812,7 @@ early_param("retbleed", retbleed_parse_cmdline);
 
 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
 #define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n"
+#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
 
 static void __init retbleed_select_mitigation(void)
 {
@@ -821,12 +829,15 @@ static void __init retbleed_select_mitigation(void)
 
        case RETBLEED_CMD_AUTO:
        default:
-               if (!boot_cpu_has_bug(X86_BUG_RETBLEED))
-                       break;
-
                if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
                    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
                        retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+
+               /*
+                * The Intel mitigation (IBRS) was already selected in
+                * spectre_v2_select_mitigation().
+                */
+
                break;
        }
 
@@ -856,15 +867,31 @@ static void __init retbleed_select_mitigation(void)
                break;
        }
 
+       /*
+        * Let IBRS trump all on Intel without affecting the effects of the
+        * retbleed= cmdline option.
+        */
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+               switch (spectre_v2_enabled) {
+               case SPECTRE_V2_IBRS:
+                       retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
+                       break;
+               case SPECTRE_V2_EIBRS:
+               case SPECTRE_V2_EIBRS_RETPOLINE:
+               case SPECTRE_V2_EIBRS_LFENCE:
+                       retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
+                       break;
+               default:
+                       pr_err(RETBLEED_INTEL_MSG);
+               }
+       }
+
        pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
 }
 
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 
-static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
-       SPECTRE_V2_NONE;
-
 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
        SPECTRE_V2_USER_NONE;
 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
index 5214c4bba33b49768630c4705698b44ac867970e..ab68595e217e8eee6cc5c61e64cd5c5830710d37 100644 (file)
@@ -1124,24 +1124,24 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_INTEL_STEPPINGS(BROADWELL_G,     X86_STEPPING_ANY,               SRBDS),
        VULNBL_INTEL_STEPPINGS(BROADWELL_X,     X86_STEPPING_ANY,               MMIO),
        VULNBL_INTEL_STEPPINGS(BROADWELL,       X86_STEPPING_ANY,               SRBDS),
-       VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPINGS(0x3, 0x3),        SRBDS | MMIO),
+       VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPINGS(0x3, 0x3),        SRBDS | MMIO | RETBLEED),
        VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPING_ANY,               SRBDS),
        VULNBL_INTEL_STEPPINGS(SKYLAKE_X,       BIT(3) | BIT(4) | BIT(6) |
-                                               BIT(7) | BIT(0xB),              MMIO),
-       VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPINGS(0x3, 0x3),        SRBDS | MMIO),
+                                               BIT(7) | BIT(0xB),              MMIO | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPINGS(0x3, 0x3),        SRBDS | MMIO | RETBLEED),
        VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPING_ANY,               SRBDS),
-       VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPINGS(0x9, 0xC),        SRBDS | MMIO),
+       VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPINGS(0x9, 0xC),        SRBDS | MMIO | RETBLEED),
        VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPINGS(0x0, 0x8),        SRBDS),
-       VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPINGS(0x9, 0xD),        SRBDS | MMIO),
+       VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPINGS(0x9, 0xD),        SRBDS | MMIO | RETBLEED),
        VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPINGS(0x0, 0x8),        SRBDS),
-       VULNBL_INTEL_STEPPINGS(ICELAKE_L,       X86_STEPPINGS(0x5, 0x5),        MMIO | MMIO_SBDS),
+       VULNBL_INTEL_STEPPINGS(ICELAKE_L,       X86_STEPPINGS(0x5, 0x5),        MMIO | MMIO_SBDS | RETBLEED),
        VULNBL_INTEL_STEPPINGS(ICELAKE_D,       X86_STEPPINGS(0x1, 0x1),        MMIO),
        VULNBL_INTEL_STEPPINGS(ICELAKE_X,       X86_STEPPINGS(0x4, 0x6),        MMIO),
-       VULNBL_INTEL_STEPPINGS(COMETLAKE,       BIT(2) | BIT(3) | BIT(5),       MMIO | MMIO_SBDS),
-       VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS),
-       VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x0, 0x0),        MMIO),
-       VULNBL_INTEL_STEPPINGS(LAKEFIELD,       X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS),
-       VULNBL_INTEL_STEPPINGS(ROCKETLAKE,      X86_STEPPINGS(0x1, 0x1),        MMIO),
+       VULNBL_INTEL_STEPPINGS(COMETLAKE,       BIT(2) | BIT(3) | BIT(5),       MMIO | MMIO_SBDS | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x0, 0x0),        MMIO | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(LAKEFIELD,       X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(ROCKETLAKE,      X86_STEPPINGS(0x1, 0x1),        MMIO | RETBLEED),
        VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,    X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS),
        VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,  X86_STEPPING_ANY,               MMIO),
        VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,  X86_STEPPINGS(0x0, 0x0),        MMIO | MMIO_SBDS),
@@ -1251,7 +1251,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
            !arch_cap_mmio_immune(ia32_cap))
                setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
 
-       if (cpu_matches(cpu_vuln_blacklist, RETBLEED))
+       if ((cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)))
                setup_force_cpu_bug(X86_BUG_RETBLEED);
 
        if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))