]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
x86/KVM/VMX: Split the VMX MSR LOAD structures to have an host/guest numbers
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Wed, 20 Jun 2018 17:58:37 +0000 (13:58 -0400)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Sat, 11 Aug 2018 00:44:39 +0000 (20:44 -0400)
There is no semantic change but this change allows an unbalanced amount of
MSRs to be loaded on VMEXIT and VMENTER, i.e. the number of MSRs to save or
restore on VMEXIT or VMENTER may be different.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Orabug: 28220674
CVE: CVE-2018-3646

(cherry picked from commit 33966dd6b2d2c352fae55412db2ea8cfff5df13a)

Signed-off-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Conflicts:
arch/x86/kvm/vmx.c
Contextual: different content; in this version, prepare_vmcs02_full
doesn't exist and nested_vmx_vmexit lacks the lines modified by the
original patch.

arch/x86/kvm/vmx.c

index 6e21980f559e04d8af97a975268341705fff9b36..cb73b0b77e61be59870a05d38704d606f7b77a9d 100644 (file)
@@ -527,6 +527,11 @@ static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
        return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
 }
 
+struct vmx_msrs {
+       unsigned int            nr;
+       struct vmx_msr_entry    val[NR_AUTOLOAD_MSRS];
+};
+
 struct vcpu_vmx {
        struct kvm_vcpu       vcpu;
        unsigned long         host_rsp;
@@ -556,9 +561,8 @@ struct vcpu_vmx {
        struct loaded_vmcs   *loaded_vmcs;
        bool                  __launched; /* temporary, used in vmx_vcpu_run */
        struct msr_autoload {
-               unsigned nr;
-               struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
-               struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
+               struct vmx_msrs guest;
+               struct vmx_msrs host;
        } msr_autoload;
        struct {
                int           loaded;
@@ -1684,18 +1688,18 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
                }
                break;
        }
-
-       for (i = 0; i < m->nr; ++i)
-               if (m->guest[i].index == msr)
+       for (i = 0; i < m->guest.nr; ++i)
+               if (m->guest.val[i].index == msr)
                        break;
 
-       if (i == m->nr)
+       if (i == m->guest.nr)
                return;
-       --m->nr;
-       m->guest[i] = m->guest[m->nr];
-       m->host[i] = m->host[m->nr];
-       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
-       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+       --m->guest.nr;
+       --m->host.nr;
+       m->guest.val[i] = m->guest.val[m->guest.nr];
+       m->host.val[i] = m->host.val[m->host.nr];
+       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
 }
 
 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
@@ -1747,24 +1751,25 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
                wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
        }
 
-       for (i = 0; i < m->nr; ++i)
-               if (m->guest[i].index == msr)
+       for (i = 0; i < m->guest.nr; ++i)
+               if (m->guest.val[i].index == msr)
                        break;
 
        if (i == NR_AUTOLOAD_MSRS) {
                printk_once(KERN_WARNING "Not enough msr switch entries. "
                                "Can't add msr %x\n", msr);
                return;
-       } else if (i == m->nr) {
-               ++m->nr;
-               vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
-               vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+       } else if (i == m->guest.nr) {
+               ++m->guest.nr;
+               ++m->host.nr;
+               vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+               vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
        }
 
-       m->guest[i].index = msr;
-       m->guest[i].value = guest_val;
-       m->host[i].index = msr;
-       m->host[i].value = host_val;
+       m->guest.val[i].index = msr;
+       m->guest.val[i].value = guest_val;
+       m->host.val[i].index = msr;
+       m->host.val[i].value = host_val;
 }
 
 static void reload_tss(void)
@@ -4779,9 +4784,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 
        vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
        vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
-       vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+       vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
        vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
-       vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+       vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
 
        if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
                u32 msr_low, msr_high;