A guest may modify the SPEC_CTRL MSR from the value used by the
kernel. Since the kernel doesn't use IBRS, this means a value of zero is
what is needed in the host.
But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to
the other bits as reserved so the kernel should respect the boot time
SPEC_CTRL value and use that.
This allows to deal with future extensions to the SPEC_CTRL interface if
any at all.
Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any
difference as paravirt will over-write the callq *0xfff.. with the wrmsrl
assembler code.
OraBug:
28041771
CVE: CVE-2018-3639
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
(cherry picked from commit
5cf687548705412da47c9cec342fd952d71ed3d5)
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
Conflicts:
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
[We need to preserve the check for ibrs_inuse - which we can do now in the
functions]
Signed-off-by: Brian Maly <brian.maly@oracle.com>
extern void x86_spec_ctrl_set(u64);
extern u64 x86_spec_ctrl_get_default(void);
+/*
+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+ * the guest has, while on VMEXIT we restore the host view. This
+ * would be easier if SPEC_CTRL were architecturally maskable or
+ * shadowable for guests but this is not (currently) the case.
+ * Takes the guest view of SPEC_CTRL MSR as a parameter.
+ */
+extern void x86_spec_ctrl_set_guest(u64);
+extern void x86_spec_ctrl_restore_host(u64);
+
extern char __indirect_thunk_start[];
extern char __indirect_thunk_end[];
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+{
+ if (!ibrs_supported)
+ return;
+ if (ibrs_inuse || x86_spec_ctrl_base != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+}
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
+
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+{
+ if (!ibrs_supported)
+ return;
+ if (ibrs_inuse)
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_priv);
+ else if (x86_spec_ctrl_base != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
/*
* Disable retpoline and attempt to fall back to another Spectre v2 mitigation.
* If possible, fall back to IBRS and IBPB.
local_irq_enable();
- if (ibrs_supported) {
- if (ibrs_inuse || svm->spec_ctrl || x86_spec_ctrl_base)
- wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
- }
+ x86_spec_ctrl_set_guest(svm->spec_ctrl);
asm volatile (
"push %%" _ASM_BP "; \n\t"
"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
if (ibrs_supported) {
rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
- if (ibrs_inuse)
- native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_priv);
- else if (svm->spec_ctrl)
- native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+ x86_spec_ctrl_restore_host(svm->spec_ctrl);
}
/* Eliminate branch target predictions from guest mode */
vmx->__launched = vmx->loaded_vmcs->launched;
- if (ibrs_supported) {
- if (ibrs_inuse || vmx->spec_ctrl || x86_spec_ctrl_base)
- wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
- }
+ x86_spec_ctrl_set_guest(vmx->spec_ctrl);
asm(
/* Store host registers */
if (ibrs_supported) {
rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
- if (ibrs_inuse)
- native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_priv);
- else if (vmx->spec_ctrl)
- native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+ x86_spec_ctrl_restore_host(vmx->spec_ctrl);
}
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */