From: Tom Lendacky Date: Thu, 10 May 2018 20:06:39 +0000 (+0200) Subject: KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD X-Git-Tag: v4.1.12-124.31.3~735 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=3bcbae0555f99cdc70de75e5283564093aadef79;p=users%2Fjedix%2Flinux-maple.git KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD Expose the new virtualized architectural mechanism, VIRT_SSBD, for using speculative store bypass disable (SSBD) under SVM. This will allow guests to use SSBD on hardware that uses non-architectural mechanisms for enabling SSBD. [ tglx: Folded the migration fixup from Paolo Bonzini ] Signed-off-by: Tom Lendacky Signed-off-by: Thomas Gleixner Orabug: 28063992 CVE: CVE-2018-3639 (cherry picked from commit bc226f07) Signed-off-by: Mihai Carabas Reviewed-by: Konrad Rzeszutek Wilk Reviewed-by: Darren Kenny Signed-off-by: Brian Maly Conflicts: arch/x86/include/asm/kvm_host.h arch/x86/kernel/cpu/common.c arch/x86/kvm/cpuid.c arch/x86/kvm/svm.c arch/x86/kvm/vmx.c arch/x86/kvm/x86.c [ We did not have cpu_has_high_real_mode_segbase entry at all. Also msr_info is not in this patchset, I will take care of it in Orabug: 28069548 in a future patchset. ] Signed-off-by: Brian Maly --- diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 868bcad618324..4b3cc2be72601 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -708,6 +708,7 @@ struct kvm_x86_ops { int (*hardware_setup)(void); /* __init */ void (*hardware_unsetup)(void); /* __exit */ bool (*cpu_has_accelerated_tpr)(void); + bool (*has_emulated_msr)(int index); void (*cpuid_update)(struct kvm_vcpu *vcpu); /* Create, but do not attach this VCPU */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index d293c8e4dd3d7..1890a4b4bb26a 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -766,7 +766,8 @@ static void init_amd(struct cpuinfo_x86 *c) /* AMD CPUs don't reset SS attributes on SYSRET */ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); - if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) { + if (boot_cpu_has(X86_FEATURE_AMD_SSBD) || + cpu_has(c, X86_FEATURE_VIRT_SSBD)) { set_cpu_cap(c, X86_FEATURE_SSBD); set_cpu_cap(c, X86_FEATURE_AMD_SSBD); } diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index fb84a07d6bb3d..06e302a011e31 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -66,6 +66,7 @@ u64 kvm_supported_xcr0(void) /* CPUID[eax=0x80000008].ebx */ #define KVM_CPUID_BIT_IBPB_SUPPORT 12 +#define KVM_CPUID_BIT_VIRT_SSBD 25 #define KF(x) bit(KVM_CPUID_BIT_##x) @@ -370,7 +371,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 0x80000008.ebx */ const u32 kvm_cpuid_80000008_ebx_x86_features = - KF(IBPB_SUPPORT); + KF(IBPB_SUPPORT) | KF(VIRT_SSBD); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); @@ -600,9 +601,17 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, g_phys_as = phys_as; entry->eax = g_phys_as | (virt_as << 8); entry->edx = 0; + + if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) + entry->ebx |= KF(VIRT_SSBD); + entry->ebx &= kvm_cpuid_80000008_ebx_x86_features; + if ( !boot_cpu_has(X86_FEATURE_IBPB) ) - entry->ebx &= !(1u << KVM_CPUID_BIT_IBPB_SUPPORT); + entry->ebx &= ~(1u << KVM_CPUID_BIT_IBPB_SUPPORT); + + if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) + entry->ebx |= KF(VIRT_SSBD); break; } case 0x80000019: diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d03593025f2f3..fd56cdaf6496b 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3173,6 +3173,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) case MSR_IA32_SPEC_CTRL: *data = svm->spec_ctrl; break; + case MSR_AMD64_VIRT_SPEC_CTRL: + *data = svm->virt_spec_ctrl; + break; case MSR_IA32_UCODE_REV: *data = 0x01000065; break; @@ -3291,6 +3294,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_SPEC_CTRL: svm->spec_ctrl = data; break; + case MSR_AMD64_VIRT_SPEC_CTRL: + if (data & ~SPEC_CTRL_SSBD) + return 1; + + svm->virt_spec_ctrl = data; + break; default: return kvm_set_msr_common(vcpu, msr); } @@ -4149,6 +4158,11 @@ static bool svm_cpu_has_accelerated_tpr(void) return false; } +static bool svm_has_emulated_msr(int index) +{ + return true; +} + static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { return 0; @@ -4424,6 +4438,7 @@ static struct kvm_x86_ops svm_x86_ops = { .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, + .has_emulated_msr = svm_has_emulated_msr, .vcpu_create = svm_create_vcpu, .vcpu_free = svm_free_vcpu, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f2fa1162751e8..e6931417a1571 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8018,6 +8018,17 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) local_irq_enable(); } +static bool vmx_has_emulated_msr(int index) +{ + switch (index) { + case MSR_AMD64_VIRT_SPEC_CTRL: + /* This is AMD only. */ + return false; + default: + return true; + } +} + static bool vmx_mpx_supported(void) { return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) && @@ -10254,6 +10265,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .hardware_enable = hardware_enable, .hardware_disable = hardware_disable, .cpu_has_accelerated_tpr = report_flexpriority, + .has_emulated_msr = vmx_has_emulated_msr, .vcpu_create = vmx_create_vcpu, .vcpu_free = vmx_free_vcpu,