]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/kvm: Pad RSB on VM transition
authorTim Chen <tim.c.chen@linux.intel.com>
Wed, 20 Dec 2017 16:04:47 +0000 (08:04 -0800)
committerKirtikar Kashyap <kirtikar.kashyap@oracle.com>
Fri, 12 Jan 2018 18:19:57 +0000 (10:19 -0800)
This is a patch from:

From: Tim Chen <tim.c.chen@linux.intel.com>
Date: Thu, 30 Nov 2017 15:00:10 +0100
Subject: [RHEL7.5 PATCH 05/35] x86/kvm: Pad RSB on VM transition

Add code to pad the local CPU's RSB entries to protect
from previous less privilege mode.

Orabug: 27344012
CVE: CVE-2017-5715

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: John Haxby <john.haxby@oracle.com>
Signed-off-by: Kirtikar Kashyap <kirtikar.kashyap@oracle.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c

index 868bcad618324ef54ab1cedfc69519587c5e487f..093fc1fb17f6ae755ba753be9e5b7af66f908d82 100644 (file)
@@ -93,6 +93,43 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 
 #define ASYNC_PF_PER_VCPU 64
 
+static inline void stuff_RSB(void)
+{
+       __asm__ __volatile__("       call 1f; pause;"
+                            "1:     call 2f; pause;"
+                            "2:     call 3f; pause;"
+                            "3:     call 4f; pause;"
+                            "4:     call 5f; pause;"
+                            "5:     call 6f; pause;"
+                            "6:     call 7f; pause;"
+                            "7:     call 8f; pause;"
+                            "8:     call 9f; pause;"
+                            "9:     call 10f; pause;"
+                            "10:    call 11f; pause;"
+                            "11:    call 12f; pause;"
+                            "12:    call 13f; pause;"
+                            "13:    call 14f; pause;"
+                            "14:    call 15f; pause;"
+                            "15:    call 16f; pause;"
+                            "16:    call 17f; pause;"
+                            "17:    call 18f; pause;"
+                            "18:    call 19f; pause;"
+                            "19:    call 20f; pause;"
+                            "20:    call 21f; pause;"
+                            "21:    call 22f; pause;"
+                            "22:    call 23f; pause;"
+                            "23:    call 24f; pause;"
+                            "24:    call 25f; pause;"
+                            "25:    call 26f; pause;"
+                            "26:    call 27f; pause;"
+                            "27:    call 28f; pause;"
+                            "28:    call 29f; pause;"
+                            "29:    call 30f; pause;"
+                            "30:    call 31f; pause;"
+                            "31:    call 32f; pause;"
+                            "32:    add $(32*8), %%rsp": : :"memory");
+}
+
 enum kvm_reg {
        VCPU_REGS_RAX = 0,
        VCPU_REGS_RCX = 1,
index 7c7aebddbbef0a8b023d19302f55a3cd4c53eb4d..0e9d78928966e2974e361940afeb017b0ad62503 100644 (file)
@@ -3983,6 +3983,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
                );
 
+       stuff_RSB();
+
 #ifdef CONFIG_X86_64
        wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
index 68c6ad376acb564a7702d21ccd0a7ce831221ed7..9f21e6b16a389a9fde046dddeee1d6d39685f3c5 100644 (file)
@@ -8296,6 +8296,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
              );
 
+       stuff_RSB();
+
        /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
        if (debugctlmsr)
                update_debugctlmsr(debugctlmsr);