]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86: cpu: microcode: fix late loading SSBD and L1TF bugs eval
authorMihai Carabas <mihai.carabas@oracle.com>
Mon, 11 Feb 2019 13:04:07 +0000 (15:04 +0200)
committerBrian Maly <brian.maly@oracle.com>
Tue, 26 Mar 2019 20:32:54 +0000 (16:32 -0400)
On microcode reloading we have to update the status of SpectreV2 mitigations if
they were not present at init time: we have opted for the default mitigation:
seccomp or prctl (so per process).

For L1TF we do not have to do anything as host mitigation does not depend on
any CPU bits and from hypervisor perspective we just call vmentry_l1d_flush_set
to re-assess the mitigation. vmentry_l1d_flush_ops is exposed through this structure:
static const struct kernel_param_ops vmentry_l1d_flush_ops = {
>-------.set = vmentry_l1d_flush_set,
>-------.get = vmentry_l1d_flush_get,
};

And can be set/get using this sysfs entry:
/sys/module/kvm_intel/parameters/vmentry_l1d_flush

It was not possible to use the same functions as most of the logic is using
boot_command_line which is in init data and dropped after booting.

Orabug: 29336760

Signed-off-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
arch/x86/kernel/cpu/bugs_64.c
arch/x86/kernel/cpu/microcode/core.c

index c56b49c7a5c0e60a7bfde459a80cedfd4764772d..6377ce7e17c1674ba0cd3c768c027bb311ebd2a4 100644 (file)
@@ -350,8 +350,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
                }
 
                /* SSBD controlled in MSR_SPEC_CTRL */
-               if (static_cpu_has(X86_FEATURE_SSBD) ||
-                   static_cpu_has(X86_FEATURE_AMD_SSBD))
+               if (boot_cpu_has(X86_FEATURE_SSBD) ||
+                   boot_cpu_has(X86_FEATURE_AMD_SSBD))
                        hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
                if (hostval != guestval) {
@@ -1313,6 +1313,50 @@ static ssize_t l1tf_show_state(char *buf)
 }
 #endif
 
+/*
+ * This function replicates at runtime what check_bugs would do at init time.
+ * As we will be using default mitigations everywhere, essentially we have
+ * dropped the logic of parsing boot_command_line which either was not
+ * possible.
+ */
+void microcode_late_select_mitigation(void)
+{
+       enum spectre_v2_mitigation mode;
+       bool microcode_added_ssbd  = false;
+       /*
+        * In late loading we will use default mitigation which is
+        * secomp or prctl. We will do this ONLY if these bits were
+        * not present at init time and were added by microcode late
+        * loading.
+        */
+       if (cpu_has(&cpu_data(smp_processor_id()), X86_FEATURE_SSBD) &&
+           !static_cpu_has(X86_FEATURE_SSBD)) {
+               setup_force_cpu_cap(X86_FEATURE_SSBD);
+               microcode_added_ssbd = true;
+       }
+       if (cpu_has(&cpu_data(smp_processor_id()), X86_FEATURE_AMD_SSBD) &&
+           !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+               setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
+               microcode_added_ssbd = true;
+       }
+
+       if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) {
+               if (microcode_added_ssbd) {
+                       if (IS_ENABLED(CONFIG_SECCOMP))
+                               ssb_mode = SPEC_STORE_BYPASS_SECCOMP;
+                       else
+                               ssb_mode = SPEC_STORE_BYPASS_PRCTL;
+               }
+#undef pr_fmt
+#define pr_fmt(fmt)    "Speculative Store Bypass late loading: " fmt
+               pr_info("%s\n", ssb_strings[ssb_mode]);
+
+       } else {
+               ssb_mode = SPEC_STORE_BYPASS_NONE;
+       }
+}
+
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
                              char *buf, unsigned int bug)
 {
index a6e07caf4cff07b14bc024b45ff057f1805961f5..b81dadfd22149ebe82478573d8108b4e3d9f072e 100644 (file)
@@ -119,6 +119,8 @@ static DEFINE_MUTEX(microcode_mutex);
 struct ucode_cpu_info          ucode_cpu_info[NR_CPUS];
 EXPORT_SYMBOL_GPL(ucode_cpu_info);
 
+extern void microcode_late_select_mitigation(void);
+
 /*
  * Operations that are run on a target cpu:
  */
@@ -271,6 +273,7 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
        if (ret > 0) {
                perf_check_microcode();
                microcode_late_eval_cpuid_all();
+               microcode_late_select_mitigation();
        }
 
        mutex_unlock(&microcode_mutex);
@@ -391,6 +394,7 @@ static ssize_t reload_store(struct device *dev,
        if (!ret) {
                perf_check_microcode();
                microcode_late_eval_cpuid_all();
+               microcode_late_select_mitigation();
        }
 
        mutex_unlock(&microcode_mutex);