]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/bugs, KVM: Support the combination of guest and host IBRS
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thu, 26 Apr 2018 02:04:19 +0000 (22:04 -0400)
committerBrian Maly <brian.maly@oracle.com>
Mon, 21 May 2018 22:02:42 +0000 (18:02 -0400)
A guest may modify the SPEC_CTRL MSR from the value used by the
kernel. Since the kernel doesn't use IBRS, this means a value of zero is
what is needed in the host.

But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to
the other bits as reserved so the kernel should respect the boot time
SPEC_CTRL value and use that.

This allows to deal with future extensions to the SPEC_CTRL interface if
any at all.

Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any
difference as paravirt will over-write the callq *0xfff.. with the wrmsrl
assembler code.

OraBug: 28041771
CVE: CVE-2018-3639

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
(cherry picked from commit 5cf687548705412da47c9cec342fd952d71ed3d5)
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
 Conflicts:
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
[We need to preserve the check for ibrs_inuse - which we can do now in the
     functions]

Signed-off-by: Brian Maly <brian.maly@oracle.com>
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs_64.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c

index abc0e887e06466bbbbacfd270224a4fbcefc6250..d2997f2b96936dd4ee9605e44a8f7db7f4721541 100644 (file)
@@ -183,6 +183,16 @@ enum spectre_v2_mitigation {
 extern void x86_spec_ctrl_set(u64);
 extern u64 x86_spec_ctrl_get_default(void);
 
+/*
+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+ * the guest has, while on VMEXIT we restore the host view. This
+ * would be easier if SPEC_CTRL were architecturally maskable or
+ * shadowable for guests but this is not (currently) the case.
+ * Takes the guest view of SPEC_CTRL MSR as a parameter.
+ */
+extern void x86_spec_ctrl_set_guest(u64);
+extern void x86_spec_ctrl_restore_host(u64);
+
 extern char __indirect_thunk_start[];
 extern char __indirect_thunk_end[];
 
index ec6df4448c94e6d140e0de3ecd8078c907673aed..0ab8ea1ef83beb3ea54eec0ec918f98e8f12dc06 100644 (file)
@@ -219,6 +219,26 @@ u64 x86_spec_ctrl_get_default(void)
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
 
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+{
+       if (!ibrs_supported)
+               return;
+       if (ibrs_inuse || x86_spec_ctrl_base != guest_spec_ctrl)
+               wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+}
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
+
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+{
+       if (!ibrs_supported)
+               return;
+       if (ibrs_inuse)
+               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_priv);
+       else if (x86_spec_ctrl_base != guest_spec_ctrl)
+               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
 /*
  * Disable retpoline and attempt to fall back to another Spectre v2 mitigation.
  * If possible, fall back to IBRS and IBPB.
index f424dd0c4b14bc08bd1bfc12a55edd69629dbe4f..439604faa3a4c722f9f8342b2ef0f6ab3e0f5a55 100644 (file)
@@ -3929,10 +3929,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 
        local_irq_enable();
 
-       if (ibrs_supported) {
-               if (ibrs_inuse || svm->spec_ctrl || x86_spec_ctrl_base)
-                       wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
-       }
+       x86_spec_ctrl_set_guest(svm->spec_ctrl);
        asm volatile (
                "push %%" _ASM_BP "; \n\t"
                "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
@@ -4027,10 +4024,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 
        if (ibrs_supported) {
                rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
-               if (ibrs_inuse)
-                       native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_priv);
-               else if (svm->spec_ctrl)
-                       native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+               x86_spec_ctrl_restore_host(svm->spec_ctrl);
        }
 
        /* Eliminate branch target predictions from guest mode */
index 478e759f2fa996765ba6e994739731abe46218cc..9601f0af9a92f9a948d1af6f48bd6e2043fb9474 100644 (file)
@@ -8201,10 +8201,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
        vmx->__launched = vmx->loaded_vmcs->launched;
 
-       if (ibrs_supported) {
-               if (ibrs_inuse || vmx->spec_ctrl || x86_spec_ctrl_base)
-                       wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
-       }
+       x86_spec_ctrl_set_guest(vmx->spec_ctrl);
 
        asm(
                /* Store host registers */
@@ -8333,10 +8330,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
        if (ibrs_supported) {
                rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
-               if (ibrs_inuse)
-                       native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_priv);
-               else if (vmx->spec_ctrl)
-                       native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+               x86_spec_ctrl_restore_host(vmx->spec_ctrl);
        }
 
        /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */