if (!cpu_has_vmx_unrestricted_guest())
                enable_unrestricted_guest = 0;
 
-       if (!cpu_has_vmx_flexpriority())
+       if (!cpu_has_vmx_flexpriority()) {
                flexpriority_enabled = 0;
 
+               /*
+                * set_apic_access_page_addr() is used to reload apic access
+                * page upon invalidation.  No need to do anything if the
+                * processor does not have the APIC_ACCESS_ADDR VMCS field.
+                */
+               kvm_x86_ops->set_apic_access_page_addr = NULL;
+       }
+
        if (!cpu_has_vmx_tpr_shadow())
                kvm_x86_ops->update_cr8_intercept = NULL;
 
                vmcs_write32(TPR_THRESHOLD, 0);
        }
 
-       if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
-               vmcs_write64(APIC_ACCESS_ADDR,
-                            page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
+       kvm_vcpu_reload_apic_access_page(vcpu);
 
        if (vmx_vm_has_apicv(vcpu->kvm))
                memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
        vmx_set_msr_bitmap(vcpu);
 }
 
+static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       /*
+        * Currently we do not handle the nested case where L2 has an
+        * APIC access page of its own; that page is still pinned.
+        * Hence, we skip the case where the VCPU is in guest mode _and_
+        * L1 prepared an APIC access page for L2.
+        *
+        * For the case where L1 and L2 share the same APIC access page
+        * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear
+        * in the vmcs12), this function will only update either the vmcs01
+        * or the vmcs02.  If the former, the vmcs02 will be updated by
+        * prepare_vmcs02.  If the latter, the vmcs01 will be updated in
+        * the next L2->L1 exit.
+        */
+       if (!is_guest_mode(vcpu) ||
+           !nested_cpu_has2(vmx->nested.current_vmcs12,
+                            SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+               vmcs_write64(APIC_ACCESS_ADDR, hpa);
+}
+
 static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
 {
        u16 status;
                } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
                        exec_control |=
                                SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
-                       vmcs_write64(APIC_ACCESS_ADDR,
-                               page_to_phys(vcpu->kvm->arch.apic_access_page));
+                       kvm_vcpu_reload_apic_access_page(vcpu);
                }
 
                vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
                vmx->nested.virtual_apic_page = NULL;
        }
 
+       /*
+        * We are now running in L2, mmu_notifier will force to reload the
+        * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
+        */
+       kvm_vcpu_reload_apic_access_page(vcpu);
+
        /*
         * Exiting from L2 to L1, we're now back to L1 which thinks it just
         * finished a VMLAUNCH or VMRESUME instruction, so we need to set the
        .enable_irq_window = enable_irq_window,
        .update_cr8_intercept = update_cr8_intercept,
        .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
+       .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
        .vm_has_apicv = vmx_vm_has_apicv,
        .load_eoi_exitmap = vmx_load_eoi_exitmap,
        .hwapic_irr_update = vmx_hwapic_irr_update,