]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/KVM/VMX: Introduce per-host-cpu analogue of l1tf_flush_l1d
authorNicolai Stange <nstange@suse.de>
Fri, 27 Jul 2018 11:22:16 +0000 (13:22 +0200)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Sat, 11 Aug 2018 00:44:49 +0000 (20:44 -0400)
Part of the L1TF mitigation for vmx includes flushing the L1D cache upon
VMENTRY.

L1D flushes are costly and two modes of operations are provided to users:
"always" and the more selective "conditional" mode.

If operating in the latter, the cache would get flushed only if a host side
code path considered unconfined had been traversed. "Unconfined" in this
context means that it might have pulled in sensitive data like user data
or kernel crypto keys.

The need for L1D flushes is tracked by means of the per-vcpu flag
l1tf_flush_l1d. KVM exit handlers considered unconfined set it. A
vmx_l1d_flush() subsequently invoked before the next VMENTER will conduct a
L1d flush based on its value and reset that flag again.

Currently, interrupts delivered "normally" while in root operation between
VMEXIT and VMENTER are not taken into account. Part of the reason is that
these don't leave any traces and thus, the vmx code is unable to tell if
any such has happened.

As proposed by Paolo Bonzini, prepare for tracking all interrupts by
introducing a new per-cpu flag, "kvm_cpu_l1tf_flush_l1d". It will be in
strong analogy to the per-vcpu ->l1tf_flush_l1d.

A later patch will make interrupt handlers set it.

For the sake of cache locality, group kvm_cpu_l1tf_flush_l1d into x86'
per-cpu irq_cpustat_t as suggested by Peter Zijlstra.

Provide the helpers kvm_set_cpu_l1tf_flush_l1d(),
kvm_clear_cpu_l1tf_flush_l1d() and kvm_get_cpu_l1tf_flush_l1d(). Make them
trivial resp. non-existent for !CONFIG_KVM_INTEL as appropriate.

Let vmx_l1d_flush() handle kvm_cpu_l1tf_flush_l1d in the same way as
l1tf_flush_l1d.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Orabug: 28220625
CVE: CVE-2018-3646

(cherry picked from commit 45b575c00d8e72d69d75dd8c112f044b7b01b069)

Signed-off-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Conflicts:
arch/x86/kvm/vmx.c
Contextual: different content caused by not having all static key features

arch/x86/include/asm/hardirq.h
arch/x86/kvm/vmx.c

index edd280d39a313499d416b40786092f2c510b0bfd..4852ca0af9f5d45a7da3d93a16759de3539c073b 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/threads.h>
 #include <linux/irq.h>
+#include <linux/uek_kabi.h>
 
 typedef struct {
        unsigned int __softirq_pending;
@@ -32,6 +33,9 @@ typedef struct {
 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
        unsigned int irq_hv_callback_count;
 #endif
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+       UEK_KABI_EXTEND(u8 kvm_cpu_l1tf_flush_l1d)
+#endif
 } ____cacheline_aligned irq_cpustat_t;
 
 DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
@@ -56,4 +60,24 @@ extern u64 arch_irq_stat_cpu(unsigned int cpu);
 extern u64 arch_irq_stat(void);
 #define arch_irq_stat          arch_irq_stat
 
+
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+static inline void kvm_set_cpu_l1tf_flush_l1d(void)
+{
+       __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
+}
+
+static inline void kvm_clear_cpu_l1tf_flush_l1d(void)
+{
+       __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0);
+}
+
+static inline bool kvm_get_cpu_l1tf_flush_l1d(void)
+{
+       return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
+}
+#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
+static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
+#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
+
 #endif /* _ASM_X86_HARDIRQ_H */
index 080865d2880ed3b254a75b81fb53e5937aa5b98c..622b5bbf9d734ca8b41e4ca6386ec61d693f8748 100644 (file)
@@ -8046,14 +8046,23 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
         * 'always'
         */
        if (likely(static_key_enabled(&vmx_l1d_flush_cond))) {
-               bool flush_l1d = vcpu->arch.l1tf_flush_l1d;
+               bool flush_l1d;
 
                /*
-                * Clear the flush bit, it gets set again either from
-                * vcpu_run() or from one of the unsafe VMEXIT
-                * handlers.
+                * Clear the per-vcpu flush bit, it gets set again
+                * either from vcpu_run() or from one of the unsafe
+                * VMEXIT handlers.
                 */
+               flush_l1d = vcpu->arch.l1tf_flush_l1d;
                vcpu->arch.l1tf_flush_l1d = false;
+
+               /*
+                * Clear the per-cpu flush bit, it gets set again from
+                * the interrupt handlers.
+                */
+               flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
+               kvm_clear_cpu_l1tf_flush_l1d();
+
                if (!flush_l1d)
                        return;
        }