From: Konrad Rzeszutek Wilk Date: Thu, 1 Feb 2018 20:47:19 +0000 (-0500) Subject: KVM: VMX: Allow direct access to MSR_IA32_SPEC_CTRL X-Git-Tag: v4.1.12-124.31.3~1163 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=775acc5d4a1b82f587c55f0ab442b05e07dd0ac8;p=users%2Fjedix%2Flinux-maple.git KVM: VMX: Allow direct access to MSR_IA32_SPEC_CTRL This is an adopation of what is being posted upstream. The issue here is that guest kernels such as Windows needs IBRS to solve their spectre_v2 mitigation. And since our kernel can do either IBRS or retpoline we need to be aware of both. (We are ignoring for right now the situation in which the microcode is not loaded and we want to emulate IBRS) In short: 1) If the CPU has microcode, and host uses IBRS we need to save the MSR during VMEXIT and write our IBRS value. Also before VMENTER we need to write the guest MSR value. 2) If the host kernel uses retpoline we need to WRMSR the guest MSR value on VMENTER, but we are optimizing by only doing it if it a non-zero value. On VMEXIT we read the guest MSR value. Orabug: 27477743 CVE: CVE-2017-5715 Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Pavel Tatashin --- diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index c58f32463dda..d2392f8affba 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -442,6 +442,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, // TSC_ADJUST is emulated entry->ebx |= F(TSC_ADJUST); entry->edx &= kvm_cpuid_7_0_edx_x86_features; + /* Aka !ibrs_supported and !ibpb_supported */ if ( !boot_cpu_has(X86_FEATURE_SPEC_CTRL) ) entry->edx &= !(1u << KVM_CPUID_BIT_SPEC_CTRL); if ( !boot_cpu_has(X86_FEATURE_STIPB) ) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e8eff903bd43..4bbde9861259 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3932,10 +3932,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); - if (ibrs_inuse && - svm->spec_ctrl != SPEC_CTRL_FEATURE_ENABLE_IBRS) - wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); - + if (ibrs_supported) { + if (ibrs_inuse || svm->spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + } asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" @@ -4028,9 +4028,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); - if (ibrs_inuse) { + if (ibrs_supported) { rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); - if (svm->spec_ctrl != SPEC_CTRL_FEATURE_ENABLE_IBRS) + if (ibrs_inuse) wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_FEATURE_ENABLE_IBRS); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1373b8e2003a..b8465bb9e54c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8193,8 +8193,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx->__launched = vmx->loaded_vmcs->launched; - if (ibrs_inuse) - wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); + if (ibrs_supported) { + if (ibrs_inuse || vmx->spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); + } asm( /* Store host registers */ @@ -8318,14 +8320,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif ); - if (ibrs_inuse) { - rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); - wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_FEATURE_ENABLE_IBRS); - } - /* Eliminate branch target predictions from guest mode */ vmexit_fill_RSB(); + if (ibrs_supported) { + rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); + if (ibrs_inuse) + wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_FEATURE_ENABLE_IBRS); + } + /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (debugctlmsr) update_debugctlmsr(debugctlmsr);