void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
                                   const struct kvm_memory_slot *slot)
 {
-       bool flush;
-
        if (kvm_memslots_have_rmaps(kvm)) {
                write_lock(&kvm->mmu_lock);
                /*
                 * logging at a 4k granularity and never creates collapsible
                 * 2m SPTEs during dirty logging.
                 */
-               flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
-               if (flush)
+               if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
                        kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
                write_unlock(&kvm->mmu_lock);
        }
 
        if (is_tdp_mmu_enabled(kvm)) {
                read_lock(&kvm->mmu_lock);
-               flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, false);
-               if (flush)
-                       kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+               kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
                read_unlock(&kvm->mmu_lock);
        }
 }
 
  * Clear leaf entries which could be replaced by large mappings, for
  * GFNs within the slot.
  */
-static bool zap_collapsible_spte_range(struct kvm *kvm,
+static void zap_collapsible_spte_range(struct kvm *kvm,
                                       struct kvm_mmu_page *root,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush)
+                                      const struct kvm_memory_slot *slot)
 {
        gfn_t start = slot->base_gfn;
        gfn_t end = start + slot->npages;
 
        tdp_root_for_each_pte(iter, root, start, end) {
 retry:
-               if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
-                       flush = false;
+               if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
                        continue;
-               }
 
                if (!is_shadow_present_pte(iter.old_spte) ||
                    !is_last_spte(iter.old_spte, iter.level))
                                                            pfn, PG_LEVEL_NUM))
                        continue;
 
+               /* Note, a successful atomic zap also does a remote TLB flush. */
                if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
                        /*
                         * The iter must explicitly re-read the SPTE because
                        iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
                        goto retry;
                }
-               flush = true;
        }
 
        rcu_read_unlock();
-
-       return flush;
 }
 
 /*
  * Clear non-leaf entries (and free associated page tables) which could
  * be replaced by large mappings, for GFNs within the slot.
  */
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush)
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+                                      const struct kvm_memory_slot *slot)
 {
        struct kvm_mmu_page *root;
 
        lockdep_assert_held_read(&kvm->mmu_lock);
 
        for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
-               flush = zap_collapsible_spte_range(kvm, root, slot, flush);
-
-       return flush;
+               zap_collapsible_spte_range(kvm, root, slot);
 }
 
 /*
 
                                       struct kvm_memory_slot *slot,
                                       gfn_t gfn, unsigned long mask,
                                       bool wrprot);
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush);
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+                                      const struct kvm_memory_slot *slot);
 
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
                                   struct kvm_memory_slot *slot, gfn_t gfn,