]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Revert "KVM: VMX: introduce alloc_loaded_vmcs"
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>
Tue, 26 Mar 2019 22:46:03 +0000 (18:46 -0400)
committerBrian Maly <brian.maly@oracle.com>
Wed, 27 Mar 2019 18:51:50 +0000 (14:51 -0400)
This reverts commit 8dd66ca98dfc03f7921ce5bf5926ce2d95507d84.

Revert due to performance regression.

Orabug: 29542029

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
arch/x86/kvm/vmx.c

index 4b2c767216d77a799974fb848bd700998c74040c..633be63fd08d57a2e81475ffae2f91a0659f4f8b 100644 (file)
@@ -3391,6 +3391,11 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
        return vmcs;
 }
 
+static struct vmcs *alloc_vmcs(void)
+{
+       return alloc_vmcs_cpu(raw_smp_processor_id());
+}
+
 static void free_vmcs(struct vmcs *vmcs)
 {
        free_pages((unsigned long)vmcs, vmcs_config.order);
@@ -3414,21 +3419,6 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
                wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
 }
 
-static struct vmcs *alloc_vmcs(void)
-{
-       return alloc_vmcs_cpu(raw_smp_processor_id());
-}
-
-static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
-{
-       loaded_vmcs->vmcs = alloc_vmcs();
-       if (!loaded_vmcs->vmcs)
-               return -ENOMEM;
-
-       loaded_vmcs_init(loaded_vmcs);
-       return 0;
-}
-
 static void free_kvm_area(void)
 {
        int cpu;
@@ -6751,7 +6741,6 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
        struct kvm_segment cs;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs *shadow_vmcs;
-       int r;
        const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
                | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
 
@@ -6793,9 +6782,10 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
-       if (r < 0)
+       vmx->nested.vmcs02.vmcs = alloc_vmcs();
+       if (!vmx->nested.vmcs02.vmcs)
                goto out_vmcs02;
+       loaded_vmcs_init(&vmx->nested.vmcs02);
 
        if (cpu_has_vmx_msr_bitmap()) {
                vmx->nested.msr_bitmap =
@@ -8710,10 +8700,15 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
                goto uninit_vcpu;
        }
 
-       err = alloc_loaded_vmcs(&vmx->vmcs01);
-       if (err < 0)
-               goto free_msrs;
        vmx->loaded_vmcs = &vmx->vmcs01;
+       vmx->loaded_vmcs->vmcs = alloc_vmcs();
+       if (!vmx->loaded_vmcs->vmcs)
+               goto free_msrs;
+       if (!vmm_exclusive)
+               kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
+       loaded_vmcs_init(vmx->loaded_vmcs);
+       if (!vmm_exclusive)
+               kvm_cpu_vmxoff();
 
        cpu = get_cpu();
        vmx_vcpu_load(&vmx->vcpu, cpu);