From: Mihai Carabas Date: Mon, 11 Feb 2019 13:04:07 +0000 (+0200) Subject: x86: cpu: microcode: fix late loading SSBD and L1TF bugs eval X-Git-Tag: v4.1.12-124.31.3~233 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=3d71c94a6b65a1af82dd08a4cbd53ddc9ea5ff90;p=users%2Fjedix%2Flinux-maple.git x86: cpu: microcode: fix late loading SSBD and L1TF bugs eval On microcode reloading we have to update the status of SpectreV2 mitigations if they were not present at init time: we have opted for the default mitigation: seccomp or prctl (so per process). For L1TF we do not have to do anything as host mitigation does not depend on any CPU bits and from hypervisor perspective we just call vmentry_l1d_flush_set to re-assess the mitigation. vmentry_l1d_flush_ops is exposed through this structure: static const struct kernel_param_ops vmentry_l1d_flush_ops = { >-------.set = vmentry_l1d_flush_set, >-------.get = vmentry_l1d_flush_get, }; And can be set/get using this sysfs entry: /sys/module/kvm_intel/parameters/vmentry_l1d_flush It was not possible to use the same functions as most of the logic is using boot_command_line which is in init data and dropped after booting. Orabug: 29336760 Signed-off-by: Mihai Carabas Reviewed-by: Boris Ostrovsky Signed-off-by: Brian Maly --- diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c index c56b49c7a5c0e..6377ce7e17c16 100644 --- a/arch/x86/kernel/cpu/bugs_64.c +++ b/arch/x86/kernel/cpu/bugs_64.c @@ -350,8 +350,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) } /* SSBD controlled in MSR_SPEC_CTRL */ - if (static_cpu_has(X86_FEATURE_SSBD) || - static_cpu_has(X86_FEATURE_AMD_SSBD)) + if (boot_cpu_has(X86_FEATURE_SSBD) || + boot_cpu_has(X86_FEATURE_AMD_SSBD)) hostval |= ssbd_tif_to_spec_ctrl(ti->flags); if (hostval != guestval) { @@ -1313,6 +1313,50 @@ static ssize_t l1tf_show_state(char *buf) } #endif +/* + * This function replicates at runtime what check_bugs would do at init time. + * As we will be using default mitigations everywhere, essentially we have + * dropped the logic of parsing boot_command_line which either was not + * possible. + */ +void microcode_late_select_mitigation(void) +{ + enum spectre_v2_mitigation mode; + bool microcode_added_ssbd = false; + /* + * In late loading we will use default mitigation which is + * secomp or prctl. We will do this ONLY if these bits were + * not present at init time and were added by microcode late + * loading. + */ + if (cpu_has(&cpu_data(smp_processor_id()), X86_FEATURE_SSBD) && + !static_cpu_has(X86_FEATURE_SSBD)) { + setup_force_cpu_cap(X86_FEATURE_SSBD); + microcode_added_ssbd = true; + } + if (cpu_has(&cpu_data(smp_processor_id()), X86_FEATURE_AMD_SSBD) && + !static_cpu_has(X86_FEATURE_AMD_SSBD)) { + setup_force_cpu_cap(X86_FEATURE_AMD_SSBD); + microcode_added_ssbd = true; + } + + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) { + if (microcode_added_ssbd) { + if (IS_ENABLED(CONFIG_SECCOMP)) + ssb_mode = SPEC_STORE_BYPASS_SECCOMP; + else + ssb_mode = SPEC_STORE_BYPASS_PRCTL; + } +#undef pr_fmt +#define pr_fmt(fmt) "Speculative Store Bypass late loading: " fmt + pr_info("%s\n", ssb_strings[ssb_mode]); + + } else { + ssb_mode = SPEC_STORE_BYPASS_NONE; + } +} + + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index a6e07caf4cff0..b81dadfd22149 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -119,6 +119,8 @@ static DEFINE_MUTEX(microcode_mutex); struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; EXPORT_SYMBOL_GPL(ucode_cpu_info); +extern void microcode_late_select_mitigation(void); + /* * Operations that are run on a target cpu: */ @@ -271,6 +273,7 @@ static ssize_t microcode_write(struct file *file, const char __user *buf, if (ret > 0) { perf_check_microcode(); microcode_late_eval_cpuid_all(); + microcode_late_select_mitigation(); } mutex_unlock(µcode_mutex); @@ -391,6 +394,7 @@ static ssize_t reload_store(struct device *dev, if (!ret) { perf_check_microcode(); microcode_late_eval_cpuid_all(); + microcode_late_select_mitigation(); } mutex_unlock(µcode_mutex);