The STUFF_RSB overwrite macro can be enabled dynamically with
rsb_overwrite_key instead of using X86_FEATURE_STUFF_RSB.
Signed-off-by: William Roche <william.roche@oracle.com>
Co-developed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
(cherry picked from commit
84e09871beb92364bd374d8c3bc3441a8c4be593)
Orabug:
29660924
Signed-off-by: William Roche <william.roche@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Acked-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
Conflicts:
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/spec_ctrl.h
arch/x86/kernel/cpu/bugs.c
cpufeatures.h vs cpufeature.h in UEK4
include <linux/jump_label.h> header in spec_ctrl.h to use this feature
bugs.c vs bugs_64.c in UEK4
Signed-off-by: Brian Maly <brian.maly@oracle.com>
#define X86_FEATURE_SSBD ( 7*32+25) /* Speculative Store Bypass Disable */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+26) /* "" Disable Speculative Store Bypass. */
#define X86_FEATURE_VMEXIT_RSB_FULL (7*32+27) /* "" Whether to stuff the RSB on VMEXIT. */
-#define X86_FEATURE_STUFF_RSB (7*32+28) /* "" Whether to stuff the RSB (usually dependent on !SMEP) */
#define X86_FEATURE_RETPOLINE ( 7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */
/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
#ifndef _ASM_X86_SPEC_CTRL_H
#define _ASM_X86_SPEC_CTRL_H
+#include <linux/jump_label.h>
#include <linux/stringify.h>
#include <asm/msr-index.h>
#include <asm/cpufeature.h>
9:
.endm
+/*
+ * Overwrite RSB stuffing macro.
+ */
.macro STUFF_RSB
-ALTERNATIVE __stringify(__ASM_STUFF_RSB), "", X86_FEATURE_STUFF_RSB
+ STATIC_JUMP_IF_TRUE .Lstuff_rsb_\@, rsb_overwrite_key, def=0
+ jmp .Ldone_call_\@
+.Lstuff_rsb_\@:
+ __ASM_STUFF_RSB
+.Ldone_call_\@:
.endm
#else /* __ASSEMBLY__ */
extern void unprotected_firmware_end(void);
DECLARE_STATIC_KEY_FALSE(retpoline_enabled_key);
+DECLARE_STATIC_KEY_FALSE(rsb_overwrite_key);
+
+static inline void rsb_overwrite_enable(void)
+{
+ static_branch_enable(&rsb_overwrite_key);
+}
+
+static inline void rsb_overwrite_disable(void)
+{
+ static_branch_disable(&rsb_overwrite_key);
+}
#define ibrs_firmware (use_ibrs & SPEC_CTRL_IBRS_FIRMWARE)
#define ibrs_supported (use_ibrs & SPEC_CTRL_IBRS_SUPPORTED)
DEFINE_STATIC_KEY_FALSE(retpoline_enabled_key);
EXPORT_SYMBOL(retpoline_enabled_key);
+/*
+ * RSB stuffing dynamic key to activate the STUFF_RSB overwrite macro.
+ */
+DEFINE_STATIC_KEY_FALSE(rsb_overwrite_key);
+EXPORT_SYMBOL(rsb_overwrite_key);
+
static bool is_skylake_era(void);
static void disable_ibrs_and_friends(bool);
static void activate_spectre_v2_mitigation(enum spectre_v2_mitigation);
if (boot_cpu_has(X86_FEATURE_SMEP))
return;
- setup_force_cpu_cap(X86_FEATURE_STUFF_RSB);
+ /* IBRS without SMEP needs RSB overwrite */
+ rsb_overwrite_enable();
if (*mode == SPECTRE_V2_IBRS_ENHANCED)
pr_warn("Enhanced IBRS might not provide full mitigation against Spectre v2 if SMEP is not available.\n");