]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/process: Allow runtime control of Speculative Store Bypass
authorThomas Gleixner <tglx@linutronix.de>
Sun, 29 Apr 2018 13:21:42 +0000 (15:21 +0200)
committerBrian Maly <brian.maly@oracle.com>
Mon, 21 May 2018 22:04:24 +0000 (18:04 -0400)
The Speculative Store Bypass vulnerability can be mitigated with the
Reduced Data Speculation (RDS) feature. To allow finer grained control of
this eventually expensive mitigation a per task mitigation control is
required.

Add a new TIF_RDS flag and put it into the group of TIF flags which are
evaluated for mismatch in switch_to(). If these bits differ in the previous
and the next task, then the slow path function __switch_to_xtra() is
invoked. Implement the TIF_RDS dependent mitigation control in the slow
path.

If the prctl for controlling Speculative Store Bypass is disabled or no
task uses the prctl then there is no overhead in the switch_to() fast
path.

Update the KVM related speculation control functions to take TID_RDS into
account as well.

Based on a patch from Tim Chen. Completely rewritten.

OraBug: 28041771
CVE: CVE-2018-3639

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
(cherry picked from commit 885f82bfbc6fefb6664ea27965c3ab9ac4194b8c)
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
 Conflicts:
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/process.c
[u64->u32]

Signed-off-by: Brian Maly <brian.maly@oracle.com>
arch/x86/include/asm/spec-ctrl.h
arch/x86/include/asm/thread_info.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/cpu/bugs_64.c
arch/x86/kernel/process.c

index 3ad64420a06e9d526d287b5c82c39afd590e2278..5d1939eeeb2209600afc7385afd7b2f54b047db9 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _ASM_X86_SPECCTRL_H_
 #define _ASM_X86_SPECCTRL_H_
 
+#include <linux/thread_info.h>
 #include <asm/nospec-branch.h>
 
 /*
@@ -18,4 +19,20 @@ extern void x86_spec_ctrl_restore_host(u64);
 extern u64 x86_amd_ls_cfg_base;
 extern u64 x86_amd_ls_cfg_rds_mask;
 
+/* The Intel SPEC CTRL MSR base value cache */
+extern u64 x86_spec_ctrl_base;
+
+static inline u64 rds_tif_to_spec_ctrl(u32 tifn)
+{
+       BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
+       return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
+}
+
+static inline u64 rds_tif_to_amd_ls_cfg(u32 tifn)
+{
+       return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
+}
+
+extern void speculative_store_bypass_update(void);
+
 #endif
index 9dce035b3ee0b6b06eb564fdc6abf5efe23e84dd..abe850f8fc23dc71ac506d878454e5cf15852811 100644 (file)
@@ -92,6 +92,7 @@ struct thread_info {
 #define TIF_SIGPENDING         2       /* signal pending */
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 #define TIF_SINGLESTEP         4       /* reenable singlestep on user return*/
+#define TIF_RDS                        5       /* Reduced data speculation */
 #define TIF_SYSCALL_EMU                6       /* syscall emulation active */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_SECCOMP            8       /* secure computing */
@@ -116,6 +117,7 @@ struct thread_info {
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
+#define _TIF_RDS               (1 << TIF_RDS)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_SYSCALL_EMU       (1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
@@ -164,7 +166,7 @@ struct thread_info {
 
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW                                                        \
-       (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
+       (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP | _TIF_RDS)
 
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
index 0fccb442b4543f0ce3d4a9d6e9366ce9194e816c..5390231ec762ebccf326f436404de3da90d96e3e 100644 (file)
 #define SPEC_CTRL_FEATURE_DISABLE_IBRS (0 << 0)
 #define SPEC_CTRL_IBRS                 (1 << 0)
 #define SPEC_CTRL_FEATURE_ENABLE_IBRS  (1 << 0)
-#define SPEC_CTRL_RDS                  (1 << 2) /* Reduced Data Speculation */
-
+#define SPEC_CTRL_RDS_SHIFT            2          /* Reduced Data Speculation bit */
+#define SPEC_CTRL_RDS                  (1 << SPEC_CTRL_RDS_SHIFT)   /* Reduced Data Speculation */
 
 #define MSR_IA32_SPEC_CTRL             0x00000048
 #define MSR_IA32_PRED_CMD              0x00000049
index 938a92ceae352c086782e48dbaba3d8102906e07..0f6b6e42a71b452e3db00307da0ee0583468e1e8 100644 (file)
@@ -237,27 +237,52 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
 
 u64 x86_spec_ctrl_get_default(void)
 {
-       return x86_spec_ctrl_base;
+       u64 msrval = x86_spec_ctrl_base;
+
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+               msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+       return msrval;
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
 
 void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
 {
+       u64 host = x86_spec_ctrl_base;
+
        if (!ibrs_supported)
                return;
-       if (ibrs_inuse || x86_spec_ctrl_base != guest_spec_ctrl)
+
+       if (ibrs_inuse) {
+               wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+               return;
+       }
+
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+               host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+
+       if (host != guest_spec_ctrl)
                wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
 
 void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
 {
+       u64 host = x86_spec_ctrl_base;
+
        if (!ibrs_supported)
                return;
-       if (ibrs_inuse)
+
+       if (ibrs_inuse) {
                wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_priv);
-       else if (x86_spec_ctrl_base != guest_spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+               return;
+       }
+
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+               host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+
+       if (host != guest_spec_ctrl)
+               wrmsrl(MSR_IA32_SPEC_CTRL, host);
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
 
index 3a88858730332ca3c1e65cc90c8a257df6c4b497..5d1135641bf2665164d6af1bdba352dd277cfc48 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/debugreg.h>
 #include <asm/nmi.h>
 #include <asm/tlbflush.h>
+#include <asm/spec-ctrl.h>
 
 /*
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -229,6 +230,25 @@ int set_tsc_mode(unsigned int val)
        return 0;
 }
 
+static __always_inline void __speculative_store_bypass_update(u32 tifn)
+{
+       u64 msr;
+
+       if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
+               msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
+               wrmsrl(MSR_AMD64_LS_CFG, msr);
+       } else {
+               msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
+               wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+       }
+}
+
+void speculative_store_bypass_update(void)
+{
+       __speculative_store_bypass_update(current_thread_info()->flags);
+}
+
+
 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
                      struct tss_struct *tss)
 {
@@ -270,6 +290,11 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
                 */
                memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
        }
+
+       if (test_tsk_thread_flag(prev_p, TIF_RDS) ^
+           test_tsk_thread_flag(next_p, TIF_RDS))
+               __speculative_store_bypass_update(task_thread_info(next_p)->flags);
+
        propagate_user_return_notify(prev_p, next_p);
 }