return false;
 
                swap(mmu->root_hpa, mmu->prev_root.hpa);
-               mmu->prev_root.cr3 = kvm_read_cr3(vcpu);
+               mmu->prev_root.cr3 = mmu->get_cr3(vcpu);
 
                if (new_cr3 == prev_cr3 &&
                    VALID_PAGE(mmu->root_hpa) &&
 {
        __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu));
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
 
 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
 {
 }
 
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
-                            bool accessed_dirty)
+                            bool accessed_dirty, gpa_t new_eptp)
 {
        struct kvm_mmu *context = &vcpu->arch.mmu;
        union kvm_mmu_page_role root_page_role =
                kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty);
 
+       __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role);
        context->shadow_root_level = PT64_ROOT_4LEVEL;
 
        context->nx = true;
 
 void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
-                            bool accessed_dirty);
+                            bool accessed_dirty, gpa_t new_eptp);
 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                                u64 fault_address, char *insn, int insn_len);
 
        if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu)))
                return 1;
 
-       kvm_mmu_unload(vcpu);
        kvm_init_shadow_ept_mmu(vcpu,
                        to_vmx(vcpu)->nested.msrs.ept_caps &
                        VMX_EPT_EXECUTE_ONLY_BIT,
-                       nested_ept_ad_enabled(vcpu));
+                       nested_ept_ad_enabled(vcpu),
+                       nested_ept_get_cr3(vcpu));
        vcpu->arch.mmu.set_cr3           = vmx_set_cr3;
        vcpu->arch.mmu.get_cr3           = nested_ept_get_cr3;
        vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
                                return 1;
                        }
                }
-
-               vcpu->arch.cr3 = cr3;
-               __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
        }
 
-       kvm_mmu_reset_context(vcpu);
+       if (!nested_ept)
+               kvm_mmu_new_cr3(vcpu, cr3);
+
+       vcpu->arch.cr3 = cr3;
+       __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+       kvm_init_mmu(vcpu, false);
+
        return 0;
 }
 
 
 
 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
 {
+       int ret = -EINTR;
+       int idx = srcu_read_lock(&vcpu->kvm->srcu);
+
        if (kvm_arch_vcpu_runnable(vcpu)) {
                kvm_make_request(KVM_REQ_UNHALT, vcpu);
-               return -EINTR;
+               goto out;
        }
        if (kvm_cpu_has_pending_timer(vcpu))
-               return -EINTR;
+               goto out;
        if (signal_pending(current))
-               return -EINTR;
+               goto out;
 
-       return 0;
+       ret = 0;
+out:
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+       return ret;
 }
 
 /*