From: Boris Ostrovsky Date: Tue, 26 Mar 2019 22:46:00 +0000 (-0400) Subject: Revert "KVM/x86: Add IBPB support" X-Git-Tag: v4.1.12-124.31.3~224 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=13d89de850838d5d80545e4d26890cd51cdc260e;p=users%2Fjedix%2Flinux-maple.git Revert "KVM/x86: Add IBPB support" This reverts commit e5455ef7dbbab7ee5bc901ffdc7666e61fc41e11. Revert due to performance regression. Orabug: 29542029 Signed-off-by: Boris Ostrovsky Reviewed-by: Mihai Carabas Signed-off-by: Brian Maly --- diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index feda28e6f539..58bfdbd02a61 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -56,6 +56,20 @@ u64 kvm_supported_xcr0(void) return xcr0; } +#define F(x) bit(X86_FEATURE_##x) + +/* These are scattered features in cpufeatures.h. */ +#define KVM_CPUID_BIT_IBRS 26 +#define KVM_CPUID_BIT_STIBP 27 +#define KVM_CPUID_BIT_IA32_ARCH_CAPS 29 +#define KVM_CPUID_BIT_SSBD 31 + + +/* CPUID[eax=0x80000008].ebx */ +#define KVM_CPUID_BIT_IBPB_SUPPORT 12 +#define KVM_CPUID_BIT_VIRT_SSBD 25 + +#define KF(x) bit(KVM_CPUID_BIT_##x) int kvm_update_cpuid(struct kvm_vcpu *vcpu) { @@ -358,7 +372,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 0x80000008.ebx */ const u32 kvm_cpuid_80000008_ebx_x86_features = - KF(IBPB) | KF(VIRT_SSBD); + KF(IBPB_SUPPORT) | KF(VIRT_SSBD); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); @@ -595,7 +609,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->ebx &= kvm_cpuid_80000008_ebx_x86_features; if ( !boot_cpu_has(X86_FEATURE_IBPB) ) - entry->ebx &= ~(1u << KVM_CPUID_BIT_IBPB); + entry->ebx &= ~(1u << KVM_CPUID_BIT_IBPB_SUPPORT); if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) entry->ebx |= KF(VIRT_SSBD); diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 97f1c35eb761..496b3695d3d3 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -125,31 +125,4 @@ static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) best = kvm_find_cpuid_entry(vcpu, 7, 0); return best && (best->ebx & bit(X86_FEATURE_MPX)); } - -#define F(x) bit(X86_FEATURE_##x) -#define KF(x) bit(KVM_CPUID_BIT_##x) - -/* These are scattered features in cpufeatures.h. */ -#define KVM_CPUID_BIT_IBPB 12 -#define KVM_CPUID_BIT_VIRT_SSBD 25 -#define KVM_CPUID_BIT_IBRS 26 -#define KVM_CPUID_BIT_STIBP 27 -#define KVM_CPUID_BIT_IA32_ARCH_CAPS 29 -#define KVM_CPUID_BIT_SSBD 31 - -static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); - return best && (best->ebx & KF(IBPB)); -} - -static inline bool guest_cpuid_has_ibrs(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->edx & KF(IBRS)); -} #endif diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 09f118462999..b5195e02c549 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -194,7 +194,7 @@ static const struct svm_direct_access_msrs { { .index = MSR_IA32_LASTINTFROMIP, .always = false }, { .index = MSR_IA32_LASTINTTOIP, .always = false }, { .index = MSR_IA32_SPEC_CTRL, .always = true }, - { .index = MSR_IA32_PRED_CMD, .always = false }, + { .index = MSR_IA32_PRED_CMD, .always = true }, { .index = MSR_INVALID, .always = false }, }; @@ -3304,10 +3304,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->spec_ctrl = data; break; case MSR_IA32_PRED_CMD: - if (!msr->host_initiated && - !guest_cpuid_has_ibpb(vcpu)) - return 1; - if (data & ~FEATURE_SET_IBPB) return 1; @@ -3316,10 +3312,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) if (ibpb_inuse) wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB); - - if (is_guest_mode(vcpu)) - break; - set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); break; case MSR_AMD64_VIRT_SPEC_CTRL: if (data & ~SPEC_CTRL_SSBD) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e5a13224b919..9c83350297c5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -986,8 +986,6 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); static int alloc_identity_pagetable(struct kvm *kvm); static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); -static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, - u32 msr, int type); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); @@ -1726,29 +1724,6 @@ static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) return *p; } -/* - * Check if MSR is intercepted for L01 MSR bitmap. - */ -static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) -{ - unsigned long *msr_bitmap; - int f = sizeof(unsigned long); - - if (!cpu_has_vmx_msr_bitmap()) - return true; - - msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; - - if (msr <= 0x1fff) { - return !!test_bit(msr, msr_bitmap + 0x800 / f); - } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { - msr &= 0x1fff; - return !!test_bit(msr, msr_bitmap + 0xc00 / f); - } - - return true; -} - static void update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; @@ -2959,11 +2934,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) to_vmx(vcpu)->spec_ctrl = data; break; case MSR_IA32_PRED_CMD: - if (!msr_info->host_initiated && - !guest_cpuid_has_ibpb(vcpu) && - !guest_cpuid_has_ibrs(vcpu)) - return 1; - if (data & ~FEATURE_SET_IBPB) return 1; @@ -2972,20 +2942,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (ibpb_inuse) wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB); - - /* - * For non-nested: - * When it's written (to non-zero) for the first time, pass - * it through. - * - * For nested: - * The handling of the MSR bitmap for L2 guests is done in - * nested_vmx_merge_msr_bitmap. We should not touch the - * vmcs02.msr_bitmap here since it gets completely overwritten - * in the merging. - */ - vmx_disable_intercept_for_msr(to_vmx(vcpu)->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, - MSR_TYPE_W); break; case MSR_IA32_ARCH_CAPABILITIES: vmx->arch_capabilities = data; @@ -9117,23 +9073,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, unsigned long *msr_bitmap_l1; unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; - /* - * pred_cmd is trying to verify two things: - * - * 1. L0 gave a permission to L1 to actually passthrough the MSR. This - * ensures that we do not accidentally generate an L02 MSR bitmap - * from the L12 MSR bitmap that is too permissive. - * 2. That L1 or L2s have actually used the MSR. This avoids - * unnecessarily merging of the bitmap if the MSR is unused. This - * works properly because we only update the L01 MSR bitmap lazily. - * So even if L0 should pass L1 these MSRs, the L01 bitmap is only - * updated to reflect this when L1 (or its L2s) actually write to - * the MSR. - */ - bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); - - if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && - !pred_cmd) + /* This shortcut is ok because we support only x2APIC MSRs so far. */ + if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) return false; page = nested_get_page(vcpu, vmcs12->msr_bitmap); @@ -9173,13 +9114,6 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, MSR_TYPE_W); } } - - if (pred_cmd) - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - MSR_IA32_PRED_CMD, - MSR_TYPE_W); - kunmap(page); nested_release_page_clean(page);