return &slot->arch.lpage_info[level - 2][idx];
 }
 
-static void account_shadowed(struct kvm *kvm, gfn_t gfn)
+static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        struct kvm_memory_slot *slot;
        struct kvm_lpage_info *linfo;
+       gfn_t gfn;
        int i;
 
+       gfn = sp->gfn;
        slot = gfn_to_memslot(kvm, gfn);
        for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
                linfo = lpage_info_slot(gfn, slot, i);
        kvm->arch.indirect_shadow_pages++;
 }
 
-static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
+static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        struct kvm_memory_slot *slot;
        struct kvm_lpage_info *linfo;
+       gfn_t gfn;
        int i;
 
+       gfn = sp->gfn;
        slot = gfn_to_memslot(kvm, gfn);
        for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
                linfo = lpage_info_slot(gfn, slot, i);
                if (level > PT_PAGE_TABLE_LEVEL && need_sync)
                        kvm_sync_pages(vcpu, gfn);
 
-               account_shadowed(vcpu->kvm, gfn);
+               account_shadowed(vcpu->kvm, sp);
        }
        sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
        init_shadow_page_table(sp);
        kvm_mmu_unlink_parents(kvm, sp);
 
        if (!sp->role.invalid && !sp->role.direct)
-               unaccount_shadowed(kvm, sp->gfn);
+               unaccount_shadowed(kvm, sp);
 
        if (sp->unsync)
                kvm_unlink_unsync_page(kvm, sp);