]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/svm: Set IBPB when running a different VCPU
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 30 Nov 2017 14:00:14 +0000 (15:00 +0100)
committerKirtikar Kashyap <kirtikar.kashyap@oracle.com>
Fri, 12 Jan 2018 18:19:57 +0000 (10:19 -0800)
[RHEL7.5 PATCH 09/35] x86/svm: Set IBPB when running a different VCPU

Set IBPB (Indirect Branch Prediction Barrier) when the current CPU is
going to run a VCPU different from what was previously run.  Nested
virtualization uses the same VMCB for the second level guest, but the
L1 hypervisor should be using IBRS to protect itself.

Orabug: 27344012
CVE: CVE-2017-5715

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: John Haxby <john.haxby@oracle.com>
Signed-off-by: Kirtikar Kashyap <kirtikar.kashyap@oracle.com>
arch/x86/kvm/svm.c

index 0e9d78928966e2974e361940afeb017b0ad62503..8497d51babb0726dfddcaa18a0453d2ed0328c8f 100644 (file)
@@ -408,6 +408,8 @@ struct svm_cpu_data {
        struct kvm_ldttss_desc *tss_desc;
 
        struct page *save_area;
+
+       struct vmcb *current_vmcb;
 };
 
 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -1296,11 +1298,19 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
        __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, svm);
+
+       /*
+        * The VMCB could be recycled, causing a false negative in svm_vcpu_load;
+        * block speculative execution.
+        */
+       if (static_cpu_has(X86_FEATURE_IBPB_SUPPORT))
+               wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+       struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
        int i;
 
        if (unlikely(cpu != vcpu->cpu)) {
@@ -1323,6 +1333,12 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                __this_cpu_write(current_tsc_ratio, svm->tsc_ratio);
                wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
        }
+       if (sd->current_vmcb != svm->vmcb) {
+               sd->current_vmcb = svm->vmcb;
+               if (static_cpu_has(X86_FEATURE_IBPB_SUPPORT))
+                       wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
+       }
+
 }
 
 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
@@ -2324,6 +2340,11 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        if (!nested_vmcb)
                return 1;
 
+       /*
+        * No need for IBPB here, the L1 hypervisor should be running with
+        * IBRS=1 and inserts one already when switching L2 VMs.
+        */
+
        /* Exit Guest-Mode */
        leave_guest_mode(&svm->vcpu);
        svm->nested.vmcb = 0;
@@ -2488,6 +2509,11 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        if (!nested_vmcb)
                return false;
 
+       /*
+        * No need for IBPB here, since the nested VM is less privileged.  The
+        * L1 hypervisor inserts one already when switching L2 VMs.
+        */
+
        if (!nested_vmcb_checks(nested_vmcb)) {
                nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
                nested_vmcb->control.exit_code_hi = 0;