From: Konrad Rzeszutek Wilk Date: Thu, 26 Apr 2018 02:04:19 +0000 (-0400) Subject: x86/bugs, KVM: Support the combination of guest and host IBRS X-Git-Tag: v4.1.12-124.31.3~778 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=817ca6d307dba517280f5debe57b20793c63f158;p=users%2Fjedix%2Flinux-maple.git x86/bugs, KVM: Support the combination of guest and host IBRS A guest may modify the SPEC_CTRL MSR from the value used by the kernel. Since the kernel doesn't use IBRS, this means a value of zero is what is needed in the host. But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to the other bits as reserved so the kernel should respect the boot time SPEC_CTRL value and use that. This allows to deal with future extensions to the SPEC_CTRL interface if any at all. Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any difference as paravirt will over-write the callq *0xfff.. with the wrmsrl assembler code. OraBug: 28041771 CVE: CVE-2018-3639 Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Reviewed-by: Ingo Molnar (cherry picked from commit 5cf687548705412da47c9cec342fd952d71ed3d5) Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Mihai Carabas Conflicts: arch/x86/kvm/svm.c arch/x86/kvm/vmx.c [We need to preserve the check for ibrs_inuse - which we can do now in the functions] Signed-off-by: Brian Maly --- diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index abc0e887e064..d2997f2b9693 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -183,6 +183,16 @@ enum spectre_v2_mitigation { extern void x86_spec_ctrl_set(u64); extern u64 x86_spec_ctrl_get_default(void); +/* + * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR + * the guest has, while on VMEXIT we restore the host view. This + * would be easier if SPEC_CTRL were architecturally maskable or + * shadowable for guests but this is not (currently) the case. + * Takes the guest view of SPEC_CTRL MSR as a parameter. + */ +extern void x86_spec_ctrl_set_guest(u64); +extern void x86_spec_ctrl_restore_host(u64); + extern char __indirect_thunk_start[]; extern char __indirect_thunk_end[]; diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c index ec6df4448c94..0ab8ea1ef83b 100644 --- a/arch/x86/kernel/cpu/bugs_64.c +++ b/arch/x86/kernel/cpu/bugs_64.c @@ -219,6 +219,26 @@ u64 x86_spec_ctrl_get_default(void) } EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); +void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) +{ + if (!ibrs_supported) + return; + if (ibrs_inuse || x86_spec_ctrl_base != guest_spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl); +} +EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest); + +void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) +{ + if (!ibrs_supported) + return; + if (ibrs_inuse) + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_priv); + else if (x86_spec_ctrl_base != guest_spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); +} +EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); + /* * Disable retpoline and attempt to fall back to another Spectre v2 mitigation. * If possible, fall back to IBRS and IBPB. diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f424dd0c4b14..439604faa3a4 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3929,10 +3929,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); - if (ibrs_supported) { - if (ibrs_inuse || svm->spec_ctrl || x86_spec_ctrl_base) - wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); - } + x86_spec_ctrl_set_guest(svm->spec_ctrl); asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" @@ -4027,10 +4024,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) if (ibrs_supported) { rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); - if (ibrs_inuse) - native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_priv); - else if (svm->spec_ctrl) - native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + + x86_spec_ctrl_restore_host(svm->spec_ctrl); } /* Eliminate branch target predictions from guest mode */ diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 478e759f2fa9..9601f0af9a92 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8201,10 +8201,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx->__launched = vmx->loaded_vmcs->launched; - if (ibrs_supported) { - if (ibrs_inuse || vmx->spec_ctrl || x86_spec_ctrl_base) - wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); - } + x86_spec_ctrl_set_guest(vmx->spec_ctrl); asm( /* Store host registers */ @@ -8333,10 +8330,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) if (ibrs_supported) { rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); - if (ibrs_inuse) - native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_priv); - else if (vmx->spec_ctrl) - native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + + x86_spec_ctrl_restore_host(vmx->spec_ctrl); } /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */