]> www.infradead.org Git - users/willy/xarray.git/commitdiff
KVM: x86: SVM: call KVM_REQ_GET_NESTED_STATE_PAGES on exit from SMM mode
authorMaxim Levitsky <mlevitsk@redhat.com>
Mon, 13 Sep 2021 14:09:51 +0000 (17:09 +0300)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 22 Sep 2021 14:33:17 +0000 (10:33 -0400)
Currently the KVM_REQ_GET_NESTED_STATE_PAGES on SVM only reloads PDPTRs,
and MSR bitmap, with former not really needed for SMM as SMM exit code
reloads them again from SMRAM'S CR3, and later happens to work
since MSR bitmap isn't modified while in SMM.

Still it is better to be consistient with VMX.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210913140954.165665-5-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index 2545d0c61985bd47b1d79384bfc4133cc3ce3649..b41a21cac5444131173b9adb7b357001c06dc3ed 100644 (file)
@@ -579,7 +579,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
 }
 
 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
-                        struct vmcb *vmcb12)
+                        struct vmcb *vmcb12, bool from_vmrun)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        int ret;
@@ -609,13 +609,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
        nested_vmcb02_prepare_save(svm, vmcb12);
 
        ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
-                                 nested_npt_enabled(svm), true);
+                                 nested_npt_enabled(svm), from_vmrun);
        if (ret)
                return ret;
 
        if (!npt_enabled)
                vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
 
+       if (!from_vmrun)
+               kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+
        svm_set_gif(svm, true);
 
        return 0;
@@ -681,7 +684,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
 
        svm->nested.nested_run_pending = 1;
 
-       if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
+       if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
                goto out_exit_err;
 
        if (nested_svm_vmrun_msrpm(svm))
index 35cac2046f69af4ecd76454f015b523dae901500..ffdde862a5f62281750c0121d52c69c6e4f79cc1 100644 (file)
@@ -4369,7 +4369,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
                         */
                        vmcb12 = map.hva;
                        nested_load_control_from_vmcb12(svm, &vmcb12->control);
-                       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
+                       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
 
                        kvm_vcpu_unmap(vcpu, &map_save, true);
                }
index 524d943f3efc6a60b6b97a24d243094b694dc397..128a54b1fbf141214a2b47acd42d68babfa36d65 100644 (file)
@@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
        return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
 }
 
-int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
+int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
+                        u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
 void svm_leave_nested(struct vcpu_svm *svm);
 void svm_free_nested(struct vcpu_svm *svm);
 int svm_allocate_nested(struct vcpu_svm *svm);