spin_unlock(&kvm->mmu_lock);
 }
 
-void kvm_mmu_zap_all(struct kvm *kvm)
-{
-       struct kvm_mmu_page *sp, *node;
-       LIST_HEAD(invalid_list);
-
-       spin_lock(&kvm->mmu_lock);
-restart:
-       list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
-               if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
-                       goto restart;
-
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
-       spin_unlock(&kvm->mmu_lock);
-}
-
 static void kvm_zap_obsolete_pages(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
 
 
 void kvm_arch_flush_shadow_all(struct kvm *kvm)
 {
-       kvm_mmu_zap_all(kvm);
+       kvm_mmu_invalidate_zap_all_pages(kvm);
 }
 
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
                                   struct kvm_memory_slot *slot)
 {
-       kvm_arch_flush_shadow_all(kvm);
+       kvm_mmu_invalidate_zap_all_pages(kvm);
 }
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)