*/
        spinlock_t tdp_mmu_pages_lock;
 #endif /* CONFIG_X86_64 */
+
+       /*
+        * If set, rmaps have been allocated for all memslots and should be
+        * allocated for any newly created or modified memslots.
+        */
+       bool memslots_have_rmaps;
 };
 
 struct kvm_vm_stat {
 
 
        kvm_mmu_init_tdp_mmu(kvm);
 
+       kvm->arch.memslots_have_rmaps = true;
+
        node->track_write = kvm_mmu_pte_write;
        node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
        kvm_page_track_register_notifier(kvm, node);
 
        return 0;
 }
 
-static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
+static int kvm_alloc_memslot_metadata(struct kvm *kvm,
+                                     struct kvm_memory_slot *slot,
                                      unsigned long npages)
 {
        int i, r;
         */
        memset(&slot->arch, 0, sizeof(slot->arch));
 
-       r = memslot_rmap_alloc(slot, npages);
-       if (r)
-               return r;
+       if (kvm->arch.memslots_have_rmaps) {
+               r = memslot_rmap_alloc(slot, npages);
+               if (r)
+                       return r;
+       }
 
        for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
                struct kvm_lpage_info *linfo;
                                enum kvm_mr_change change)
 {
        if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
-               return kvm_alloc_memslot_metadata(memslot,
+               return kvm_alloc_memslot_metadata(kvm, memslot,
                                                  mem->memory_size >> PAGE_SHIFT);
        return 0;
 }