* Keep track of the SPEC_CTRL MSR value for the current task, which may differ
  * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
  */
-void write_spec_ctrl_current(u64 val)
+void write_spec_ctrl_current(u64 val, bool force)
 {
        if (this_cpu_read(x86_spec_ctrl_current) == val)
                return;
 
        this_cpu_write(x86_spec_ctrl_current, val);
-       wrmsrl(MSR_IA32_SPEC_CTRL, val);
+
+       /*
+        * When KERNEL_IBRS this MSR is written on return-to-user, unless
+        * forced the update can be delayed until that time.
+        */
+       if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
+               wrmsrl(MSR_IA32_SPEC_CTRL, val);
 }
 
 /*
        if (spectre_v2_in_eibrs_mode(mode)) {
                /* Force it so VMEXIT will restore correctly */
                x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
-               write_spec_ctrl_current(x86_spec_ctrl_base);
+               write_spec_ctrl_current(x86_spec_ctrl_base, true);
        }
 
        switch (mode) {
 
 static void update_stibp_msr(void * __unused)
 {
-       write_spec_ctrl_current(x86_spec_ctrl_base);
+       write_spec_ctrl_current(x86_spec_ctrl_base, true);
 }
 
 /* Update x86_spec_ctrl_base in case SMT state changed. */
                        x86_amd_ssb_disable();
                } else {
                        x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
-                       write_spec_ctrl_current(x86_spec_ctrl_base);
+                       write_spec_ctrl_current(x86_spec_ctrl_base, true);
                }
        }
 
 void x86_spec_ctrl_setup_ap(void)
 {
        if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
-               write_spec_ctrl_current(x86_spec_ctrl_base);
+               write_spec_ctrl_current(x86_spec_ctrl_base, true);
 
        if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
                x86_amd_ssb_disable();