/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
                /sys/devices/system/cpu/vulnerabilities/l1tf
                /sys/devices/system/cpu/vulnerabilities/mds
+               /sys/devices/system/cpu/vulnerabilities/srbds
                /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
                /sys/devices/system/cpu/vulnerabilities/itlb_multihit
 Date:          January 2018
 
                        the kernel will oops in either "warn" or "fatal"
                        mode.
 
+       srbds=          [X86,INTEL]
+                       Control the Special Register Buffer Data Sampling
+                       (SRBDS) mitigation.
+
+                       Certain CPUs are vulnerable to an MDS-like
+                       exploit which can leak bits from the random
+                       number generator.
+
+                       By default, this issue is mitigated by
+                       microcode.  However, the microcode fix can cause
+                       the RDRAND and RDSEED instructions to become
+                       much slower.  Among other effects, this will
+                       result in reduced throughput from /dev/urandom.
+
+                       The microcode mitigation can be disabled with
+                       the following option:
+
+                       off:    Disable mitigation and remove
+                               performance impact to RDRAND and RDSEED
+
        srcutree.counter_wrap_check [KNL]
                        Specifies how frequently to check for
                        grace-period sequence counter wrap for the
 
 #define X86_FEATURE_AVX512_4FMAPS      (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
 #define X86_FEATURE_FSRM               (18*32+ 4) /* Fast Short Rep Mov */
 #define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
+#define X86_FEATURE_SRBDS_CTRL         (18*32+ 9) /* "" SRBDS mitigation MSR available */
 #define X86_FEATURE_MD_CLEAR           (18*32+10) /* VERW clears CPU buffers */
 #define X86_FEATURE_TSX_FORCE_ABORT    (18*32+13) /* "" TSX_FORCE_ABORT */
 #define X86_FEATURE_PCONFIG            (18*32+18) /* Intel PCONFIG */
 #define X86_BUG_SWAPGS                 X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
 #define X86_BUG_TAA                    X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
 #define X86_BUG_ITLB_MULTIHIT          X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
+#define X86_BUG_SRBDS                  X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
 
 #define TSX_CTRL_RTM_DISABLE           BIT(0)  /* Disable RTM feature */
 #define TSX_CTRL_CPUID_CLEAR           BIT(1)  /* Disable TSX enumeration */
 
+/* SRBDS support */
+#define MSR_IA32_MCU_OPT_CTRL          0x00000123
+#define RNGDS_MITG_DIS                 BIT(0)
+
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
 #define MSR_IA32_SYSENTER_EIP          0x00000176
 
 static void __init mds_select_mitigation(void);
 static void __init mds_print_mitigation(void);
 static void __init taa_select_mitigation(void);
+static void __init srbds_select_mitigation(void);
 
 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
 u64 x86_spec_ctrl_base;
        l1tf_select_mitigation();
        mds_select_mitigation();
        taa_select_mitigation();
+       srbds_select_mitigation();
 
        /*
         * As MDS and TAA mitigations are inter-related, print MDS
 }
 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
 
+#undef pr_fmt
+#define pr_fmt(fmt)    "SRBDS: " fmt
+
+enum srbds_mitigations {
+       SRBDS_MITIGATION_OFF,
+       SRBDS_MITIGATION_UCODE_NEEDED,
+       SRBDS_MITIGATION_FULL,
+       SRBDS_MITIGATION_TSX_OFF,
+       SRBDS_MITIGATION_HYPERVISOR,
+};
+
+static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
+
+static const char * const srbds_strings[] = {
+       [SRBDS_MITIGATION_OFF]          = "Vulnerable",
+       [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
+       [SRBDS_MITIGATION_FULL]         = "Mitigation: Microcode",
+       [SRBDS_MITIGATION_TSX_OFF]      = "Mitigation: TSX disabled",
+       [SRBDS_MITIGATION_HYPERVISOR]   = "Unknown: Dependent on hypervisor status",
+};
+
+static bool srbds_off;
+
+void update_srbds_msr(void)
+{
+       u64 mcu_ctrl;
+
+       if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+               return;
+
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+               return;
+
+       if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
+               return;
+
+       rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+
+       switch (srbds_mitigation) {
+       case SRBDS_MITIGATION_OFF:
+       case SRBDS_MITIGATION_TSX_OFF:
+               mcu_ctrl |= RNGDS_MITG_DIS;
+               break;
+       case SRBDS_MITIGATION_FULL:
+               mcu_ctrl &= ~RNGDS_MITG_DIS;
+               break;
+       default:
+               break;
+       }
+
+       wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+}
+
+static void __init srbds_select_mitigation(void)
+{
+       u64 ia32_cap;
+
+       if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+               return;
+
+       /*
+        * Check to see if this is one of the MDS_NO systems supporting
+        * TSX that are only exposed to SRBDS when TSX is enabled.
+        */
+       ia32_cap = x86_read_arch_cap_msr();
+       if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
+               srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
+       else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+               srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
+       else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
+               srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
+       else if (cpu_mitigations_off() || srbds_off)
+               srbds_mitigation = SRBDS_MITIGATION_OFF;
+
+       update_srbds_msr();
+       pr_info("%s\n", srbds_strings[srbds_mitigation]);
+}
+
+static int __init srbds_parse_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+               return 0;
+
+       srbds_off = !strcmp(str, "off");
+       return 0;
+}
+early_param("srbds", srbds_parse_cmdline);
+
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V1 : " fmt
 
        return "";
 }
 
+static ssize_t srbds_show_state(char *buf)
+{
+       return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
+}
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
                               char *buf, unsigned int bug)
 {
        case X86_BUG_ITLB_MULTIHIT:
                return itlb_multihit_show_state(buf);
 
+       case X86_BUG_SRBDS:
+               return srbds_show_state(buf);
+
        default:
                break;
        }
 {
        return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
 }
+
+ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
+}
 #endif
 
        {}
 };
 
+#define VULNBL_INTEL_STEPPINGS(model, steppings, issues)                  \
+       X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6,             \
+                                           INTEL_FAM6_##model, steppings, \
+                                           X86_FEATURE_ANY, issues)
+
+#define SRBDS          BIT(0)
+
+static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+       VULNBL_INTEL_STEPPINGS(IVYBRIDGE,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(HASWELL,         X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(HASWELL_L,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(HASWELL_G,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(BROADWELL_G,     X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(BROADWELL,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPINGS(0x0, 0xC),        SRBDS),
+       VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPINGS(0x0, 0xD),        SRBDS),
+       {}
+};
+
 static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
 {
        const struct x86_cpu_id *m = x86_match_cpu(table);
             (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
                setup_force_cpu_bug(X86_BUG_TAA);
 
+       /*
+        * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
+        * in the vulnerability blacklist.
+        */
+       if ((cpu_has(c, X86_FEATURE_RDRAND) ||
+            cpu_has(c, X86_FEATURE_RDSEED)) &&
+           cpu_matches(cpu_vuln_blacklist, SRBDS))
+                   setup_force_cpu_bug(X86_BUG_SRBDS);
+
        if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
                return;
 
        mtrr_ap_init();
        validate_apic_and_package_id(c);
        x86_spec_ctrl_setup_ap();
+       update_srbds_msr();
 }
 
 static __init int setup_noclflush(char *arg)
 
 unsigned int aperfmperf_get_khz(int cpu);
 
 extern void x86_spec_ctrl_setup_ap(void);
+extern void update_srbds_msr(void);
 
 extern u64 x86_read_arch_cap_msr(void);
 
 
        return sprintf(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_srbds(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
 static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
 static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
 static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
+static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_meltdown.attr,
        &dev_attr_mds.attr,
        &dev_attr_tsx_async_abort.attr,
        &dev_attr_itlb_multihit.attr,
+       &dev_attr_srbds.attr,
        NULL
 };