]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/KVM/VMX: Add L1D MSR based flush
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 2 Jul 2018 11:03:48 +0000 (13:03 +0200)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Sat, 11 Aug 2018 00:44:39 +0000 (20:44 -0400)
336996-Speculative-Execution-Side-Channel-Mitigations.pdf defines a new MSR
(IA32_FLUSH_CMD aka 0x10B) which has similar write-only semantics to other
MSRs defined in the document.

The semantics of this MSR is to allow "finer granularity invalidation of
caching structures than existing mechanisms like WBINVD. It will writeback
and invalidate the L1 data cache, including all cachelines brought in by
preceding instructions, without invalidating all caches (eg. L2 or
LLC). Some processors may also invalidate the first level level instruction
cache on a L1D_FLUSH command. The L1 data and instruction caches may be
shared across the logical processors of a core."

Use it instead of the loop based L1 flush algorithm.

A copy of this document is available at
   https://bugzilla.kernel.org/show_bug.cgi?id=199511

[ tglx: Avoid allocating pages when the MSR is available ]

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Orabug: 28220674
CVE: CVE-2018-3646

(cherry picked from commit 3fa045be4c720146b18a19cea7a767dc6ad5df94)

Signed-off-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Conflicts:
arch/x86/include/asm/msr-index.h
arch/x86/kvm/vmx.c
Contextual: different content; arch/x86/include/uapi/asm/msr-index.h
was modified instead of arch/x86/include/asm/msr-index.h which doesn't
exist in this version.

arch/x86/include/uapi/asm/msr-index.h
arch/x86/kvm/vmx.c

index aba09d9ffbe915b8ce75f063d23eb18b99f705b1..11e2755a3815097bb532d55d9c818f66dc050740 100644 (file)
                                                    * required.
                                                    */
 
+#define MSR_IA32_FLUSH_CMD             0x0000010b
+#define L1D_FLUSH                      (1 << 0)   /*
+                                                   * Writeback and invalidate the
+                                                   * L1 data cache.
+                                                   */
+
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 #define MSR_IA32_BBL_CR_CTL3           0x0000011e
 
index 78598bacee1ac321f55c90126d552ce874ddc5d4..4f80c38f37b4d4cb1f5f55eb7be4bd89f9d739f1 100644 (file)
@@ -7927,6 +7927,11 @@ static void __maybe_unused vmx_l1d_flush(void)
 {
        int size = PAGE_SIZE << L1D_CACHE_ORDER;
 
+       if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+               wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+               return;
+       }
+
        asm volatile(
                /* First ensure the pages are in the TLB */
                "xorl   %%eax, %%eax\n"
@@ -10549,11 +10554,13 @@ static int __init vmx_setup_l1d_flush(void)
            !boot_cpu_has_bug(X86_BUG_L1TF))
                return 0;
 
-       page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
-       if (!page)
-               return -ENOMEM;
+       if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+               page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+               if (!page)
+                       return -ENOMEM;
+               vmx_l1d_flush_pages = page_address(page);
+       }
 
-       vmx_l1d_flush_pages = page_address(page);
        static_key_enable(&vmx_l1d_should_flush);
        return 0;
 }