}
 
 static __always_inline bool
-slot_handle_leaf(struct kvm *kvm, const struct kvm_memory_slot *memslot,
-                slot_level_handler fn, bool flush_on_yield)
+slot_handle_level_4k(struct kvm *kvm, const struct kvm_memory_slot *memslot,
+                    slot_level_handler fn, bool flush_on_yield)
 {
        return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
                                 PG_LEVEL_4K, flush_on_yield);
 
        if (kvm_memslots_have_rmaps(kvm)) {
                write_lock(&kvm->mmu_lock);
-               flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
+               /*
+                * Zap only 4k SPTEs since the legacy MMU only supports dirty
+                * logging at a 4k granularity and never creates collapsible
+                * 2m SPTEs during dirty logging.
+                */
+               flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
                if (flush)
                        kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
                write_unlock(&kvm->mmu_lock);
 
        if (kvm_memslots_have_rmaps(kvm)) {
                write_lock(&kvm->mmu_lock);
-               flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
-                                        false);
+               /*
+                * Clear dirty bits only on 4k SPTEs since the legacy MMU only
+                * support dirty logging at a 4k granularity.
+                */
+               flush = slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
                write_unlock(&kvm->mmu_lock);
        }