struct kvm_memory_slot *slot,
                        struct kvm_page_track_notifier_node *node)
 {
-       struct kvm_mmu_page *sp;
-       LIST_HEAD(invalid_list);
-       unsigned long i;
-       bool flush;
-       gfn_t gfn;
-
-       spin_lock(&kvm->mmu_lock);
-
-       if (list_empty(&kvm->arch.active_mmu_pages))
-               goto out_unlock;
-
-       flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
-
-       for (i = 0; i < slot->npages; i++) {
-               gfn = slot->base_gfn + i;
-
-               for_each_valid_sp(kvm, sp, gfn) {
-                       if (sp->gfn != gfn)
-                               continue;
-
-                       kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
-               }
-               if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
-                       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
-                       flush = false;
-                       cond_resched_lock(&kvm->mmu_lock);
-               }
-       }
-       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
-
-out_unlock:
-       spin_unlock(&kvm->mmu_lock);
+       kvm_mmu_zap_all(kvm);
 }
 
 void kvm_mmu_init_vm(struct kvm *kvm)