#define MSR_IA32_ARCH_CAPABILITIES     0x0000010a
 #define ARCH_CAP_RDCL_NO               (1 << 0)   /* Not susceptible to Meltdown */
 #define ARCH_CAP_IBRS_ALL              (1 << 1)   /* Enhanced IBRS support */
+#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3)   /* Skip L1D flush on vmentry */
 #define ARCH_CAP_SSB_NO                        (1 << 4)   /*
                                                    * Not susceptible to Speculative Store Bypass
                                                    * attack, so no Speculative Store Bypass
 
        VMENTER_L1D_FLUSH_COND,
        VMENTER_L1D_FLUSH_ALWAYS,
        VMENTER_L1D_FLUSH_EPT_DISABLED,
+       VMENTER_L1D_FLUSH_NOT_REQUIRED,
 };
 
 extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
 
        [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
        [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
        [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
+       [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
 };
 
 static ssize_t l1tf_show_state(char *buf)
 
                return 0;
        }
 
+       if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
+              u64 msr;
+
+              rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
+              if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
+                      l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
+                      return 0;
+              }
+       }
+
        /* If set to auto use the default l1tf mitigation method */
        if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
                switch (l1tf_mitigation) {