]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/speculation: x86_spec_ctrl_set needs to be called unconditionally
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>
Wed, 21 Nov 2018 21:15:27 +0000 (16:15 -0500)
committerBrian Maly <brian.maly@oracle.com>
Tue, 27 Nov 2018 18:09:02 +0000 (13:09 -0500)
Because on entring idle we want to clear SSBD bit as well,
testing for ibrs_inuse is not sufficient.

We should also clear SSBD bit in x86_spec_ctrl_base during
initialization since it's up to kernel to manage it.

Orabug: 28814570

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
arch/x86/include/asm/mwait.h
arch/x86/kernel/cpu/bugs_64.c
arch/x86/kernel/process.c
arch/x86/kernel/smpboot.c

index f357e5fe3e427102bb4e15d7b0cd6c9b375debe1..fae52621fff1115adbefaacf80d15bc404a49ef7 100644 (file)
@@ -60,15 +60,13 @@ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
                        mb();
                }
 
-               if (ibrs_inuse)
-                       x86_spec_ctrl_set(SPEC_CTRL_IDLE_ENTER);
+               x86_spec_ctrl_set(SPEC_CTRL_IDLE_ENTER);
 
                __monitor((void *)&current_thread_info()->flags, 0, 0);
                if (!need_resched())
                        __mwait(eax, ecx);
 
-               if (ibrs_inuse)
-                       x86_spec_ctrl_set(SPEC_CTRL_IDLE_EXIT);
+               x86_spec_ctrl_set(SPEC_CTRL_IDLE_EXIT);
        }
        current_clr_polling();
 }
index 6827e4fb105c64d832d42efdfb7b75b1ca77bbd8..209cf691efed5e8f74162eae4539c97f5bc5dbe1 100644 (file)
@@ -184,9 +184,10 @@ void __init check_bugs(void)
         */
        if (boot_cpu_has(X86_FEATURE_IBRS)) {
                rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
-               if (x86_spec_ctrl_base & SPEC_CTRL_IBRS) {
-                       pr_warn("SPEC CTRL MSR (0x%16llx) has IBRS set during boot, clearing it.", x86_spec_ctrl_base);
-                       x86_spec_ctrl_base &= ~(SPEC_CTRL_IBRS);
+               if (x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_SSBD)) {
+                       pr_warn("SPEC CTRL MSR (0x%16llx) has IBRS and/or "
+                               "SSBD set during boot, clearing it.", x86_spec_ctrl_base);
+                       x86_spec_ctrl_base &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_SSBD);
                }
                x86_spec_ctrl_priv = x86_spec_ctrl_base;
                update_cpu_spec_ctrl_all();
@@ -271,6 +272,10 @@ void x86_spec_ctrl_set(enum spec_ctrl_set_context context)
 {
        u64 host;
 
+       if (context != SPEC_CTRL_INITIAL &&
+           this_cpu_read(x86_spec_ctrl_priv_cpu) == x86_spec_ctrl_base)
+               return;
+
        switch (context) {
        case SPEC_CTRL_INITIAL:
                /*
@@ -283,32 +288,27 @@ void x86_spec_ctrl_set(enum spec_ctrl_set_context context)
                break;
        case SPEC_CTRL_IDLE_ENTER:
                /*
-                * If IBRS is in use, disable it to avoid performance impact
-                * during idle.  Same idea for SSBD.
-                *
-                * The SSBD bit remains set if forced to be always on with
-                * spec_store_bypass_disable=on; otherwise, the only time it
-                * can be set, and so require unsetting, is =userspace.
+                * If IBRS/SSBD are in use, disable them to avoid performance impact
+                * during idle.
                 */
-               host = x86_spec_ctrl_base;
-               if (ssbd_userspace_selected())
-                       host &= ~SPEC_CTRL_SSBD;
+               host = x86_spec_ctrl_base & ~SPEC_CTRL_SSBD;
                break;
        case SPEC_CTRL_IDLE_EXIT:
-               /*
-                * Privileged bits meaningful only when IBRS is in use, in
-                * which case it is enabled now.
-                */
-               if (ibrs_inuse)
-                       host = this_cpu_read(x86_spec_ctrl_priv_cpu);
-               else
-                       host = x86_spec_ctrl_base;
+               host = this_cpu_read(x86_spec_ctrl_priv_cpu);
                break;
        default:
                WARN_ONCE(1, "unknown spec_ctrl_set_context %#x\n", context);
                return;
        }
 
+       /*
+        * Note that when MSR_IA32_SPEC_CTRL is not available both
+        * per_cpu(x86_spec_ctrl_priv_cpu ) and x86_spec_ctrl_base
+        * are zero. Therefore we don't need to explicitly check for
+        * MSR presence.
+        * And for SPEC_CTRL_INITIAL we are only called when we know
+        * the MSR exists.
+        */
        wrmsrl(MSR_IA32_SPEC_CTRL, host);
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
@@ -956,12 +956,8 @@ static void __init ssb_init(void)
 
                        if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
                                x86_spec_ctrl_set(SPEC_CTRL_INITIAL);
-                               if (spectre_v2_enabled == SPECTRE_V2_IBRS) {
-                                       x86_spec_ctrl_priv |= SPEC_CTRL_SSBD;
-                               }
+                               x86_spec_ctrl_priv |= SPEC_CTRL_SSBD;
                        }
-                       else
-                               x86_spec_ctrl_priv &= ~(SPEC_CTRL_SSBD);
 
                        update_cpu_spec_ctrl_all();
                        break;
index 5c73161d3da7a4b69aa8a35636f851aaea91e082..c6d19f35bb57ed9025cfd4b827fe390957dd5833 100644 (file)
@@ -612,17 +612,14 @@ static __cpuidle void mwait_idle(void)
                        smp_mb(); /* quirk */
                }
 
-               if (ibrs_inuse)
-                       x86_spec_ctrl_set(SPEC_CTRL_IDLE_ENTER);
+               x86_spec_ctrl_set(SPEC_CTRL_IDLE_ENTER);
 
                __monitor((void *)&current_thread_info()->flags, 0, 0);
                if (!need_resched()) {
                        __sti_mwait(0, 0);
-                       if (ibrs_inuse)
-                               x86_spec_ctrl_set(SPEC_CTRL_IDLE_EXIT);
+                       x86_spec_ctrl_set(SPEC_CTRL_IDLE_EXIT);
                } else {
-                       if (ibrs_inuse)
-                               x86_spec_ctrl_set(SPEC_CTRL_IDLE_EXIT);
+                       x86_spec_ctrl_set(SPEC_CTRL_IDLE_EXIT);
                        local_irq_enable();
                }
                trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
index 5eb250be895e2a32addd3f092148d7c79884d230..20b0f575da96e058d6f703b027f02e89bf1d3195 100644 (file)
@@ -1555,15 +1555,13 @@ void native_play_dead(void)
        play_dead_common();
        tboot_shutdown(TB_SHUTDOWN_WFS);
 
-       if (ibrs_inuse)
-               x86_spec_ctrl_set(SPEC_CTRL_IDLE_ENTER);
+       x86_spec_ctrl_set(SPEC_CTRL_IDLE_ENTER);
 
        mwait_play_dead();      /* Only returns on failure */
        if (cpuidle_play_dead())
                hlt_play_dead();
 
-       if (ibrs_inuse)
-               x86_spec_ctrl_set(SPEC_CTRL_IDLE_EXIT);
+       x86_spec_ctrl_set(SPEC_CTRL_IDLE_EXIT);
 }
 
 #else /* ... !CONFIG_HOTPLUG_CPU */