if (is_tdp_mmu_enabled(kvm))
                kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
                                slot->base_gfn + gfn_offset, mask, true);
+
+       if (!kvm_memslots_have_rmaps(kvm))
+               return;
+
        while (mask) {
                rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
                                          PG_LEVEL_4K, slot);
        if (is_tdp_mmu_enabled(kvm))
                kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
                                slot->base_gfn + gfn_offset, mask, false);
+
+       if (!kvm_memslots_have_rmaps(kvm))
+               return;
+
        while (mask) {
                rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
                                          PG_LEVEL_4K, slot);
        int i;
        bool write_protected = false;
 
-       for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
-               rmap_head = __gfn_to_rmap(gfn, i, slot);
-               write_protected |= __rmap_write_protect(kvm, rmap_head, true);
+       if (kvm_memslots_have_rmaps(kvm)) {
+               for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
+                       rmap_head = __gfn_to_rmap(gfn, i, slot);
+                       write_protected |= __rmap_write_protect(kvm, rmap_head, true);
+               }
        }
 
        if (is_tdp_mmu_enabled(kvm))
 
 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
 {
-       bool flush;
+       bool flush = false;
 
-       flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
+       if (kvm_memslots_have_rmaps(kvm))
+               flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
 
        if (is_tdp_mmu_enabled(kvm))
                flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
 
 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
-       bool flush;
+       bool flush = false;
 
-       flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
+       if (kvm_memslots_have_rmaps(kvm))
+               flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
 
        if (is_tdp_mmu_enabled(kvm))
                flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
 
 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
-       bool young;
+       bool young = false;
 
-       young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
+       if (kvm_memslots_have_rmaps(kvm))
+               young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
 
        if (is_tdp_mmu_enabled(kvm))
                young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
 
 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
-       bool young;
+       bool young = false;
 
-       young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
+       if (kvm_memslots_have_rmaps(kvm))
+               young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
 
        if (is_tdp_mmu_enabled(kvm))
                young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
        int i;
        bool flush = false;
 
-       write_lock(&kvm->mmu_lock);
-       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
-               slots = __kvm_memslots(kvm, i);
-               kvm_for_each_memslot(memslot, slots) {
-                       gfn_t start, end;
-
-                       start = max(gfn_start, memslot->base_gfn);
-                       end = min(gfn_end, memslot->base_gfn + memslot->npages);
-                       if (start >= end)
-                               continue;
+       if (kvm_memslots_have_rmaps(kvm)) {
+               write_lock(&kvm->mmu_lock);
+               for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+                       slots = __kvm_memslots(kvm, i);
+                       kvm_for_each_memslot(memslot, slots) {
+                               gfn_t start, end;
+
+                               start = max(gfn_start, memslot->base_gfn);
+                               end = min(gfn_end, memslot->base_gfn + memslot->npages);
+                               if (start >= end)
+                                       continue;
 
-                       flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
-                                                       PG_LEVEL_4K,
-                                                       KVM_MAX_HUGEPAGE_LEVEL,
-                                                       start, end - 1, true, flush);
+                               flush = slot_handle_level_range(kvm, memslot,
+                                               kvm_zap_rmapp, PG_LEVEL_4K,
+                                               KVM_MAX_HUGEPAGE_LEVEL, start,
+                                               end - 1, true, flush);
+                       }
                }
+               if (flush)
+                       kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
+               write_unlock(&kvm->mmu_lock);
        }
 
-       if (flush)
-               kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
-
-       write_unlock(&kvm->mmu_lock);
-
        if (is_tdp_mmu_enabled(kvm)) {
                flush = false;
 
                                      struct kvm_memory_slot *memslot,
                                      int start_level)
 {
-       bool flush;
+       bool flush = false;
 
-       write_lock(&kvm->mmu_lock);
-       flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
-                               start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
-       write_unlock(&kvm->mmu_lock);
+       if (kvm_memslots_have_rmaps(kvm)) {
+               write_lock(&kvm->mmu_lock);
+               flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
+                                         start_level, KVM_MAX_HUGEPAGE_LEVEL,
+                                         false);
+               write_unlock(&kvm->mmu_lock);
+       }
 
        if (is_tdp_mmu_enabled(kvm)) {
                read_lock(&kvm->mmu_lock);
        struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
        bool flush;
 
-       write_lock(&kvm->mmu_lock);
-       flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
-
-       if (flush)
-               kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
-       write_unlock(&kvm->mmu_lock);
+       if (kvm_memslots_have_rmaps(kvm)) {
+               write_lock(&kvm->mmu_lock);
+               flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
+               if (flush)
+                       kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+               write_unlock(&kvm->mmu_lock);
+       }
 
        if (is_tdp_mmu_enabled(kvm)) {
-               flush = false;
-
                read_lock(&kvm->mmu_lock);
                flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
                if (flush)
 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
                                   struct kvm_memory_slot *memslot)
 {
-       bool flush;
+       bool flush = false;
 
-       write_lock(&kvm->mmu_lock);
-       flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
-       write_unlock(&kvm->mmu_lock);
+       if (kvm_memslots_have_rmaps(kvm)) {
+               write_lock(&kvm->mmu_lock);
+               flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
+                                        false);
+               write_unlock(&kvm->mmu_lock);
+       }
 
        if (is_tdp_mmu_enabled(kvm)) {
                read_lock(&kvm->mmu_lock);