percpu_counter_add(&kvm_total_used_mmu_pages, nr);
 }
 
-static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
 {
        ASSERT(is_empty_shadow_page(sp->spt));
        hlist_del(&sp->hash_link);
        if (!sp->role.direct)
                free_page((unsigned long)sp->gfns);
        kmem_cache_free(mmu_page_header_cache, sp);
-       kvm_mod_used_mmu_pages(kvm, -1);
 }
 
 static unsigned kvm_page_table_hashfn(gfn_t gfn)
                /* Count self */
                ret++;
                list_move(&sp->link, invalid_list);
+               kvm_mod_used_mmu_pages(kvm, -1);
        } else {
                list_move(&sp->link, &kvm->arch.active_mmu_pages);
                kvm_reload_remote_mmus(kvm);
        do {
                sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
                WARN_ON(!sp->role.invalid || sp->root_count);
-               kvm_mmu_free_page(kvm, sp);
+               kvm_mmu_free_page(sp);
        } while (!list_empty(invalid_list));
 
 }
                        page = container_of(kvm->arch.active_mmu_pages.prev,
                                            struct kvm_mmu_page, link);
                        kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
-                       kvm_mmu_commit_zap_page(kvm, &invalid_list);
                }
+               kvm_mmu_commit_zap_page(kvm, &invalid_list);
                goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
        }
 
                sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
                                  struct kvm_mmu_page, link);
                kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
-               kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
                ++vcpu->kvm->stat.mmu_recycled;
        }
+       kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
 }
 
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,