From: Konrad Rzeszutek Wilk Date: Thu, 21 Jun 2018 02:01:22 +0000 (-0400) Subject: x86/KVM/VMX: Extend add_atomic_switch_msr() to allow VMENTER only MSRs X-Git-Tag: v4.1.12-124.31.3~632 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=d50de3cf24e237967ed11c199db7fa13f495d6dd;p=users%2Fjedix%2Flinux-maple.git x86/KVM/VMX: Extend add_atomic_switch_msr() to allow VMENTER only MSRs The IA32_FLUSH_CMD MSR needs only to be written on VMENTER. Extend add_atomic_switch_msr() with an entry_only parameter to allow storing the MSR only in the guest (ENTRY) MSR array. Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Thomas Gleixner Orabug: 28220674 CVE: CVE-2018-3646 (cherry picked from commit 989e3992d2eca32c3f1404f2bc91acda3aa122d8) Signed-off-by: Mihai Carabas Reviewed-by: Darren Kenny Reviewed-by: Boris Ostrovsky --- diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ef266021a7c1..01955fd5417a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1728,9 +1728,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, } static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, - u64 guest_val, u64 host_val) + u64 guest_val, u64 host_val, bool entry_only) { - int i, j; + int i, j = 0; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { @@ -1766,7 +1766,9 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, } i = find_msr(&m->guest, msr); - j = find_msr(&m->host, msr); + if (!entry_only) + j = find_msr(&m->host, msr); + if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); @@ -1776,12 +1778,16 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, i = m->guest.nr++; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); } + m->guest.val[i].index = msr; + m->guest.val[i].value = guest_val; + + if (entry_only) + return; + if (j < 0) { j = m->host.nr++; vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); } - m->guest.val[i].index = msr; - m->guest.val[i].value = guest_val; m->host.val[j].index = msr; m->host.val[j].value = host_val; } @@ -1840,7 +1846,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) guest_efer &= ~EFER_LME; if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, - guest_efer, host_efer); + guest_efer, host_efer, false); return false; } else { guest_efer &= ~ignore_bits; @@ -2891,7 +2897,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vcpu->arch.ia32_xss = data; if (vcpu->arch.ia32_xss != host_xss) add_atomic_switch_msr(vmx, MSR_IA32_XSS, - vcpu->arch.ia32_xss, host_xss); + vcpu->arch.ia32_xss, host_xss, false); else clear_atomic_switch_msr(vmx, MSR_IA32_XSS); break; @@ -8349,7 +8355,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) clear_atomic_switch_msr(vmx, msrs[i].msr); else add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, - msrs[i].host); + msrs[i].host, false); } static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)