This is a patch from:
From: Tim Chen <tim.c.chen@linux.intel.com>
Date: Thu, 30 Nov 2017 15:00:10 +0100
Subject: [RHEL7.5 PATCH 05/35] x86/kvm: Pad RSB on VM transition
Add code to pad the local CPU's RSB entries to protect
from previous less privilege mode.
Orabug:
27344012
CVE: CVE-2017-5715
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: John Haxby <john.haxby@oracle.com>
Signed-off-by: Kirtikar Kashyap <kirtikar.kashyap@oracle.com>
#define ASYNC_PF_PER_VCPU 64
+static inline void stuff_RSB(void)
+{
+ __asm__ __volatile__(" call 1f; pause;"
+ "1: call 2f; pause;"
+ "2: call 3f; pause;"
+ "3: call 4f; pause;"
+ "4: call 5f; pause;"
+ "5: call 6f; pause;"
+ "6: call 7f; pause;"
+ "7: call 8f; pause;"
+ "8: call 9f; pause;"
+ "9: call 10f; pause;"
+ "10: call 11f; pause;"
+ "11: call 12f; pause;"
+ "12: call 13f; pause;"
+ "13: call 14f; pause;"
+ "14: call 15f; pause;"
+ "15: call 16f; pause;"
+ "16: call 17f; pause;"
+ "17: call 18f; pause;"
+ "18: call 19f; pause;"
+ "19: call 20f; pause;"
+ "20: call 21f; pause;"
+ "21: call 22f; pause;"
+ "22: call 23f; pause;"
+ "23: call 24f; pause;"
+ "24: call 25f; pause;"
+ "25: call 26f; pause;"
+ "26: call 27f; pause;"
+ "27: call 28f; pause;"
+ "28: call 29f; pause;"
+ "29: call 30f; pause;"
+ "30: call 31f; pause;"
+ "31: call 32f; pause;"
+ "32: add $(32*8), %%rsp": : :"memory");
+}
+
enum kvm_reg {
VCPU_REGS_RAX = 0,
VCPU_REGS_RCX = 1,
#endif
);
+ stuff_RSB();
+
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else
#endif
);
+ stuff_RSB();
+
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
if (debugctlmsr)
update_debugctlmsr(debugctlmsr);