]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/bugs: Rework spec_ctrl base and mask logic
authorThomas Gleixner <tglx@linutronix.de>
Sat, 12 May 2018 18:10:00 +0000 (20:10 +0200)
committerBrian Maly <brian.maly@oracle.com>
Mon, 4 Jun 2018 17:35:30 +0000 (13:35 -0400)
x86_spec_ctrL_mask is intended to mask out bits from a MSR_SPEC_CTRL value
which are not to be modified. However the implementation is not really used
and the bitmask was inverted to make a check easier, which was removed in
"x86/bugs: Remove x86_spec_ctrl_set()"

Aside of that it is missing the STIBP bit if it is supported by the
platform, so if the mask would be used in x86_virt_spec_ctrl() then it
would prevent a guest from setting STIBP.

Add the STIBP bit if supported and use the mask in x86_virt_spec_ctrl() to
sanitize the value which is supplied by the guest.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Orabug: 28063992
CVE: CVE-2018-3639

(cherry picked from commit be6fcb54)
Signed-off-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
Conflicts:
arch/x86/kernel/cpu/bugs.c

[Konrad:
 As we have the IBRS support and boy that makes it double hard.
 The first part of this patch is to invert the mask, no biggie.

 But the mask for the IBRS mode (that is - we want to set SPEC_CTRL
 MSR to 1<<0 in kernel space, but in user-space we want it to be
 1<<1) didn't set the SSBD bit as we should not set the SSBD
 in kernel mode. But with the inversion that is OK.

 Next part is the two values - x86_spec_ctrl_base and x86_spec_ctrl_priv.

 The x86_spec_ctrl_base is what userspace is going to have (so
 tack on SSBD), and x86_spec_ctrl_priv what runs in kernel (so
 tack on IBRS, but _NOT_ SSBD).

 That means the whole logic of filtering the supported SPEC_CTRL
 value depending on what the host supports should be seeded
 with x86_spec_ctrl_priv.

 With all that the logic works - we end up ANDing our mask
 and what we can support (and the initial boot-time value of the
 MSR), and then ORing what the guest wants with our mask.

 All the while supporting any other bits in the SPEC_CTRL that
 may come in the future.

 And this logic is fine on AMD too - where the SSBD bit does not
 show up in the SPEC_CTRL mask

P.S.
To make it more fun the x86_spec_ctrl_priv |= IBRS is set in a
header (see set_ibrs_inuse).]

Signed-off-by: Brian Maly <brian.maly@oracle.com>
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/cpu/bugs_64.c

index dfc68ab361a2981bfa1fa725becefba6f44c87b6..3569972e3fc4d4c6e5ce9eafd446359eac6a81cb 100644 (file)
 #define SPEC_CTRL_FEATURE_DISABLE_IBRS (0 << 0)
 #define SPEC_CTRL_IBRS                 (1 << 0)
 #define SPEC_CTRL_FEATURE_ENABLE_IBRS  (1 << 0)
+#define SPEC_CTRL_STIBP                        (1 << 1)
 #define SPEC_CTRL_SSBD_SHIFT           2          /* Speculative Store Bypass Disable bit */
 #define SPEC_CTRL_SSBD                 (1 << SPEC_CTRL_SSBD_SHIFT)   /* Speculative Store Bypass Disable */
 
index 75138e7e623630ce3133195a00ecfebaf3444001..b6781676d845fec62c09ebbdff3314096e65d561 100644 (file)
@@ -124,7 +124,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
  * The vendor and possibly platform specific bits which can be modified in
  * x86_spec_ctrl_base.
  */
-static u64 x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
+static u64 x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
 
 /*
  * Our knob on entering the kernel to enable and disable IBRS.
@@ -161,6 +161,10 @@ void __init check_bugs(void)
                x86_spec_ctrl_priv = x86_spec_ctrl_base;
        }
 
+       /* Allow STIBP in MSR_SPEC_CTRL if supported */
+       if (boot_cpu_has(X86_FEATURE_STIBP))
+               x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
+
        /* Select the proper spectre mitigation before patching alternatives */
        spectre_v2_select_mitigation();
 
@@ -231,7 +235,7 @@ void x86_spec_ctrl_set(u64 val)
 {
        u64 host;
 
-       if (val & x86_spec_ctrl_mask)
+       if (val & ~x86_spec_ctrl_mask)
                WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
        else {
                /*
@@ -257,22 +261,44 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
 void
 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 {
+       u64 msrval, guestval, hostval = x86_spec_ctrl_base;
        struct thread_info *ti = current_thread_info();
-       u64 msr, host = x86_spec_ctrl_base;
 
        if (ibrs_supported) {
+               /*
+                * Restrict guest_spec_ctrl to supported values. Clear the
+                * modifiable bits in the host base value and or the
+                * modifiable bits from the guest value.
+                */
+               if (ibrs_inuse)
+                       /*
+                        * Except on IBRS we don't want to use host base value
+                        * but rather the privilege value which has IBRS set.
+                        */
+                       hostval = x86_spec_ctrl_priv;
+
+               guestval = hostval & ~x86_spec_ctrl_mask;
+               guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
+
                if (ibrs_inuse) {
-                       msr = setguest ? guest_spec_ctrl : x86_spec_ctrl_priv;
-                       wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+                       /* You may wonder why we don't just jump to the
+                        * 'if (hostval ! guestval)' conditional to save an MSR.
+                        * (by say the guest MSR value is IBRS and hostval being
+                        * that too) - the reason is that on some platforms the
+                        * SPEC_CTRL MSR is like a reset button, not latched.
+                        */
+                       msrval = setguest ? guestval : hostval;
+                       wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
                        return;
                }
+
                /* SSBD controlled in MSR_SPEC_CTRL */
                if (static_cpu_has(X86_FEATURE_SSBD))
-                       host |= ssbd_tif_to_spec_ctrl(ti->flags);
+                       hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
-               if (host != guest_spec_ctrl) {
-                       msr = setguest ? guest_spec_ctrl : host;
-                       wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+               if (hostval != guestval) {
+                       msrval = setguest ? guestval : hostval;
+                       wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
                }
        }
 }
@@ -766,10 +792,11 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
                switch (boot_cpu_data.x86_vendor) {
                case X86_VENDOR_INTEL:
                        x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
-                       if (mode == SPEC_STORE_BYPASS_DISABLE) {
-                               x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
+                       x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+
+                       if (mode == SPEC_STORE_BYPASS_DISABLE)
                                x86_spec_ctrl_set(SPEC_CTRL_SSBD);
-                       else
+                       else
                                x86_spec_ctrl_priv &= ~(SPEC_CTRL_SSBD);
                        break;
                case X86_VENDOR_AMD:
@@ -882,7 +909,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
 void x86_spec_ctrl_setup_ap(void)
 {
        if (boot_cpu_has(X86_FEATURE_IBRS) && ssb_mode != SPEC_STORE_BYPASS_USERSPACE)
-               x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+               x86_spec_ctrl_set(x86_spec_ctrl_base & x86_spec_ctrl_mask);
 
        if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
                x86_amd_ssbd_enable();