]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/KVM/VMX: Move the l1tf_flush_l1d test to vmx_l1d_flush()
authorNicolai Stange <nstange@suse.de>
Sat, 21 Jul 2018 20:35:28 +0000 (22:35 +0200)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Sat, 11 Aug 2018 00:44:49 +0000 (20:44 -0400)
Currently, vmx_vcpu_run() checks if l1tf_flush_l1d is set and invokes
vmx_l1d_flush() if so.

This test is unncessary for the "always flush L1D" mode.

Move the check to vmx_l1d_flush()'s conditional mode code path.

Notes:
- vmx_l1d_flush() is likely to get inlined anyway and thus, there's no
  extra function call.

- This inverts the (static) branch prediction, but there hadn't been any
  explicit likely()/unlikely() annotations before and so it stays as is.

Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Orabug: 28220625
CVE: CVE-2018-3646

(cherry picked from commit 5b6ccc6c3b1a477fbac9ec97a0b4c1c48e765209)

Signed-off-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Conflicts:
arch/x86/kvm/vmx.c
Contextual: different content caused by not having all static key features

arch/x86/kvm/vmx.c

index d7dcd2d064e96630ee8d95896bcf642db8b9a8fc..080865d2880ed3b254a75b81fb53e5937aa5b98c 100644 (file)
@@ -8046,12 +8046,16 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
         * 'always'
         */
        if (likely(static_key_enabled(&vmx_l1d_flush_cond))) {
+               bool flush_l1d = vcpu->arch.l1tf_flush_l1d;
+
                /*
                 * Clear the flush bit, it gets set again either from
                 * vcpu_run() or from one of the unsafe VMEXIT
                 * handlers.
                 */
                vcpu->arch.l1tf_flush_l1d = false;
+               if (!flush_l1d)
+                       return;
        }
 
        vcpu->stat.l1d_flush++;
@@ -8501,10 +8505,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
        x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
 
-       if (unlikely(static_key_enabled(&vmx_l1d_should_flush))) {
-               if (vcpu->arch.l1tf_flush_l1d)
-                       vmx_l1d_flush(vcpu);
-       }
+       if (unlikely(static_key_enabled(&vmx_l1d_should_flush)))
+               vmx_l1d_flush(vcpu);
 
        asm(
                /* Store host registers */