]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/bugs/AMD: Add support to disable RDS on Fam[15,16,17]h if requested
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thu, 26 Apr 2018 02:04:24 +0000 (22:04 -0400)
committerBrian Maly <brian.maly@oracle.com>
Mon, 21 May 2018 22:03:52 +0000 (18:03 -0400)
AMD does not need the Speculative Store Bypass mitigation to be enabled.

The parameters for this are already available and can be done via MSR
C001_1020. Each family uses a different bit in that MSR for this.

[ tglx: Expose the bit mask via a variable and move the actual MSR fiddling
   into the bugs code as that's the right thing to do and also required
to prepare for dynamic enable/disable ]

OraBug: 28041771
CVE: CVE-2018-3639

Suggested-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
(cherry picked from commit 764f3c21588a059cd783c6ba0734d4db2d72822d)
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
 Conflicts:
arch/x86/include/asm/cpufeatures.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c

Signed-off-by: Brian Maly <brian.maly@oracle.com>
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs_64.c
arch/x86/kernel/cpu/common.c

index 030f5b99c88637c717774e61b47529b6d0273921..62e963c8f3b60fab5c33947a91bf0a2f7e336791 100644 (file)
 #define X86_FEATURE_HWP_EPP    ( 7*32+13) /* Intel HWP_EPP */
 #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
 #define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_AMD_RDS    ( 7*32+18) /* "" AMD RDS implementation */
 #define X86_FEATURE_RSB_CTXSW  ( 7*32+19) /* "" Fill RSB on context switches */
 #define X86_FEATURE_IBRS       ( 7*32+20) /* Control Speculation Control */
 #define X86_FEATURE_STIBP      ( 7*32+21) /* Single Thread Indirect Branch Predictors */
index 026b94603c01c2dca786253b213787981039a010..31947522ca6ac90e3efee8f1ba82203b55921bc4 100644 (file)
@@ -199,6 +199,10 @@ enum ssb_mitigation {
        SPEC_STORE_BYPASS_DISABLE,
 };
 
+/* AMD specific Speculative Store Bypass MSR data */
+extern u64 x86_amd_ls_cfg_base;
+extern u64 x86_amd_ls_cfg_rds_mask;
+
 extern char __indirect_thunk_start[];
 extern char __indirect_thunk_end[];
 
index 5d9632f267a7369f6fccaf9163204a37522a3521..0cf61578649ebc6e0344007f4798808e3097b61e 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
+#include <asm/nospec-branch.h>
 #include <asm/smp.h>
 #include <asm/pci-direct.h>
 
@@ -493,6 +494,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                /* A random value per boot for bit slice [12:upper_bit) */
                va_align.bits = get_random_int() & va_align.mask;
        }
+
+       if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+               unsigned int bit;
+
+               switch (c->x86) {
+               case 0x15: bit = 54; break;
+               case 0x16: bit = 33; break;
+               case 0x17: bit = 10; break;
+               default: return;
+               }
+               /*
+                * Try to cache the base value so further operations can
+                * avoid RMW. If that faults, do not enable RDS.
+                */
+               if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+                       setup_force_cpu_cap(X86_FEATURE_RDS);
+                       setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
+                       x86_amd_ls_cfg_rds_mask = 1ULL << bit;
+               }
+       }
 }
 
 static void early_init_amd(struct cpuinfo_x86 *c)
@@ -732,6 +753,11 @@ static void init_amd(struct cpuinfo_x86 *c)
 
        /* AMD CPUs don't reset SS attributes on SYSRET */
        set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+
+       if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
+               set_cpu_cap(c, X86_FEATURE_RDS);
+               set_cpu_cap(c, X86_FEATURE_AMD_RDS);
+       }
 }
 
 #ifdef CONFIG_X86_32
index 2299a4c89dae88b6ec4555fbe46b97c4a9ebf4ec..0b32cffae91a1955443c5bf3e107854168a8dad4 100644 (file)
@@ -117,6 +117,13 @@ static u64 x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
 u64 x86_spec_ctrl_priv;
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_priv);
 
+/*
+ * AMD specific MSR info for Speculative Store Bypass control.
+ * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
+ */
+u64 x86_amd_ls_cfg_base;
+u64 x86_amd_ls_cfg_rds_mask;
+
 void __init check_bugs(void)
 {
        identify_boot_cpu();
@@ -126,7 +133,8 @@ void __init check_bugs(void)
 #endif
        /*
         * Read the SPEC_CTRL MSR to account for reserved bits which may
-        * have unknown values.
+        * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+        * init code as it is not enumerated and depends on the family.
         */
        if (boot_cpu_has(X86_FEATURE_IBRS)) {
                rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
@@ -252,6 +260,15 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
 
+static void x86_amd_rds_enable(void)
+{
+       u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
+
+       if (boot_cpu_has(X86_FEATURE_AMD_RDS))
+               wrmsrl(MSR_AMD64_LS_CFG, msrval);
+}
+
+
 /*
  * Disable retpoline and attempt to fall back to another Spectre v2 mitigation.
  * If possible, fall back to IBRS and IBPB.
@@ -667,6 +684,11 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
 
        switch (cmd) {
        case SPEC_STORE_BYPASS_CMD_AUTO:
+               /*
+                * AMD platforms by default don't need SSB mitigation.
+                */
+               if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+                       break;
        case SPEC_STORE_BYPASS_CMD_ON:
                mode = SPEC_STORE_BYPASS_DISABLE;
                break;
@@ -693,6 +715,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
                        x86_spec_ctrl_set(SPEC_CTRL_RDS);
                        break;
                case X86_VENDOR_AMD:
+                       x86_amd_rds_enable();
                        break;
                }
        }
@@ -713,6 +736,9 @@ void x86_spec_ctrl_setup_ap(void)
 {
        if (boot_cpu_has(X86_FEATURE_IBRS))
                x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+
+       if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+               x86_amd_rds_enable();
 }
 
 #ifdef CONFIG_SYSFS
index 13e40d37246bc47a7b7de2d02ff043fe9e7c6d25..1df878c10dfec4b8ff90f1834f3ee3143e81e16f 100644 (file)
@@ -836,6 +836,10 @@ static const struct x86_cpu_id cpu_no_spec_store_bypass[] = {
        { X86_VENDOR_CENTAUR,   5,                                      },
        { X86_VENDOR_INTEL,     5,                                      },
        { X86_VENDOR_NSC,       5,                                      },
+       { X86_VENDOR_AMD,       0x12,                                   },
+       { X86_VENDOR_AMD,       0x11,                                   },
+       { X86_VENDOR_AMD,       0x10,                                   },
+       { X86_VENDOR_AMD,       0xf,                                    },
        { X86_VENDOR_ANY,       4,                                      },
        {}
 };