struct kvm_ldttss_desc *tss_desc;
struct page *save_area;
+
+ struct vmcb *current_vmcb;
};
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
+
+ /*
+ * The VMCB could be recycled, causing a false negative in svm_vcpu_load;
+ * block speculative execution.
+ */
+ if (static_cpu_has(X86_FEATURE_IBPB_SUPPORT))
+ wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
int i;
if (unlikely(cpu != vcpu->cpu)) {
__this_cpu_write(current_tsc_ratio, svm->tsc_ratio);
wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
}
+ if (sd->current_vmcb != svm->vmcb) {
+ sd->current_vmcb = svm->vmcb;
+ if (static_cpu_has(X86_FEATURE_IBPB_SUPPORT))
+ wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
+ }
+
}
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
if (!nested_vmcb)
return 1;
+ /*
+ * No need for IBPB here, the L1 hypervisor should be running with
+ * IBRS=1 and inserts one already when switching L2 VMs.
+ */
+
/* Exit Guest-Mode */
leave_guest_mode(&svm->vcpu);
svm->nested.vmcb = 0;
if (!nested_vmcb)
return false;
+ /*
+ * No need for IBPB here, since the nested VM is less privileged. The
+ * L1 hypervisor inserts one already when switching L2 VMs.
+ */
+
if (!nested_vmcb_checks(nested_vmcb)) {
nested_vmcb->control.exit_code = SVM_EXIT_ERR;
nested_vmcb->control.exit_code_hi = 0;