return xcr0;
}
+#define F(x) bit(X86_FEATURE_##x)
+
+/* These are scattered features in cpufeatures.h. */
+#define KVM_CPUID_BIT_IBRS 26
+#define KVM_CPUID_BIT_STIBP 27
+#define KVM_CPUID_BIT_IA32_ARCH_CAPS 29
+#define KVM_CPUID_BIT_SSBD 31
+
+
+/* CPUID[eax=0x80000008].ebx */
+#define KVM_CPUID_BIT_IBPB_SUPPORT 12
+#define KVM_CPUID_BIT_VIRT_SSBD 25
+
+#define KF(x) bit(KVM_CPUID_BIT_##x)
int kvm_update_cpuid(struct kvm_vcpu *vcpu)
{
/* cpuid 0x80000008.ebx */
const u32 kvm_cpuid_80000008_ebx_x86_features =
- KF(IBPB) | KF(VIRT_SSBD);
+ KF(IBPB_SUPPORT) | KF(VIRT_SSBD);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
entry->ebx &= kvm_cpuid_80000008_ebx_x86_features;
if ( !boot_cpu_has(X86_FEATURE_IBPB) )
- entry->ebx &= ~(1u << KVM_CPUID_BIT_IBPB);
+ entry->ebx &= ~(1u << KVM_CPUID_BIT_IBPB_SUPPORT);
if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
entry->ebx |= KF(VIRT_SSBD);
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_MPX));
}
-
-#define F(x) bit(X86_FEATURE_##x)
-#define KF(x) bit(KVM_CPUID_BIT_##x)
-
-/* These are scattered features in cpufeatures.h. */
-#define KVM_CPUID_BIT_IBPB 12
-#define KVM_CPUID_BIT_VIRT_SSBD 25
-#define KVM_CPUID_BIT_IBRS 26
-#define KVM_CPUID_BIT_STIBP 27
-#define KVM_CPUID_BIT_IA32_ARCH_CAPS 29
-#define KVM_CPUID_BIT_SSBD 31
-
-static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
- return best && (best->ebx & KF(IBPB));
-}
-
-static inline bool guest_cpuid_has_ibrs(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->edx & KF(IBRS));
-}
#endif
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
{ .index = MSR_IA32_LASTINTTOIP, .always = false },
{ .index = MSR_IA32_SPEC_CTRL, .always = true },
- { .index = MSR_IA32_PRED_CMD, .always = false },
+ { .index = MSR_IA32_PRED_CMD, .always = true },
{ .index = MSR_INVALID, .always = false },
};
svm->spec_ctrl = data;
break;
case MSR_IA32_PRED_CMD:
- if (!msr->host_initiated &&
- !guest_cpuid_has_ibpb(vcpu))
- return 1;
-
if (data & ~FEATURE_SET_IBPB)
return 1;
if (ibpb_inuse)
wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
-
- if (is_guest_mode(vcpu))
- break;
- set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
break;
case MSR_AMD64_VIRT_SPEC_CTRL:
if (data & ~SPEC_CTRL_SSBD)
static int alloc_identity_pagetable(struct kvm *kvm);
static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
-static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type);
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
return *p;
}
-/*
- * Check if MSR is intercepted for L01 MSR bitmap.
- */
-static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
-{
- unsigned long *msr_bitmap;
- int f = sizeof(unsigned long);
-
- if (!cpu_has_vmx_msr_bitmap())
- return true;
-
- msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
-
- if (msr <= 0x1fff) {
- return !!test_bit(msr, msr_bitmap + 0x800 / f);
- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
- msr &= 0x1fff;
- return !!test_bit(msr, msr_bitmap + 0xc00 / f);
- }
-
- return true;
-}
-
static void update_exception_bitmap(struct kvm_vcpu *vcpu)
{
u32 eb;
to_vmx(vcpu)->spec_ctrl = data;
break;
case MSR_IA32_PRED_CMD:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has_ibpb(vcpu) &&
- !guest_cpuid_has_ibrs(vcpu))
- return 1;
-
if (data & ~FEATURE_SET_IBPB)
return 1;
if (ibpb_inuse)
wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
-
- /*
- * For non-nested:
- * When it's written (to non-zero) for the first time, pass
- * it through.
- *
- * For nested:
- * The handling of the MSR bitmap for L2 guests is done in
- * nested_vmx_merge_msr_bitmap. We should not touch the
- * vmcs02.msr_bitmap here since it gets completely overwritten
- * in the merging.
- */
- vmx_disable_intercept_for_msr(to_vmx(vcpu)->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
- MSR_TYPE_W);
break;
case MSR_IA32_ARCH_CAPABILITIES:
vmx->arch_capabilities = data;
unsigned long *msr_bitmap_l1;
unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
- /*
- * pred_cmd is trying to verify two things:
- *
- * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
- * ensures that we do not accidentally generate an L02 MSR bitmap
- * from the L12 MSR bitmap that is too permissive.
- * 2. That L1 or L2s have actually used the MSR. This avoids
- * unnecessarily merging of the bitmap if the MSR is unused. This
- * works properly because we only update the L01 MSR bitmap lazily.
- * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
- * updated to reflect this when L1 (or its L2s) actually write to
- * the MSR.
- */
- bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
-
- if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
- !pred_cmd)
+ /* This shortcut is ok because we support only x2APIC MSRs so far. */
+ if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
return false;
page = nested_get_page(vcpu, vmcs12->msr_bitmap);
MSR_TYPE_W);
}
}
-
- if (pred_cmd)
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- MSR_IA32_PRED_CMD,
- MSR_TYPE_W);
-
kunmap(page);
nested_release_page_clean(page);