return true;
 }
 
+static __ro_after_init HLIST_HEAD(empty_page_hash);
+
+static struct hlist_head *kvm_get_mmu_page_hash(struct kvm *kvm, gfn_t gfn)
+{
+       /*
+        * Ensure the load of the hash table pointer itself is ordered before
+        * loads to walk the table.  The pointer is set at runtime outside of
+        * mmu_lock when the TDP MMU is enabled, i.e. when the hash table of
+        * shadow pages becomes necessary only when KVM needs to shadow L1's
+        * TDP for an L2 guest.  Pairs with the smp_store_release() in
+        * kvm_mmu_alloc_page_hash().
+        */
+       struct hlist_head *page_hash = smp_load_acquire(&kvm->arch.mmu_page_hash);
+
+       lockdep_assert_held(&kvm->mmu_lock);
+
+       if (!page_hash)
+               return &empty_page_hash;
+
+       return &page_hash[kvm_page_table_hashfn(gfn)];
+}
+
 #define for_each_valid_sp(_kvm, _sp, _list)                            \
        hlist_for_each_entry(_sp, _list, hash_link)                     \
                if (is_obsolete_sp((_kvm), (_sp))) {                    \
                } else
 
 #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn)              \
-       for_each_valid_sp(_kvm, _sp,                                    \
-         &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])     \
+       for_each_valid_sp(_kvm, _sp, kvm_get_mmu_page_hash(_kvm, _gfn)) \
                if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
 
 static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
        struct kvm_mmu_page *sp;
        bool created = false;
 
+       /*
+        * No need for memory barriers, unlike in kvm_get_mmu_page_hash(), as
+        * mmu_page_hash must be set prior to creating the first shadow root,
+        * i.e. reaching this point is fully serialized by slots_arch_lock.
+        */
+       BUG_ON(!kvm->arch.mmu_page_hash);
        sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
 
        sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
 {
        struct hlist_head *h;
 
+       if (kvm->arch.mmu_page_hash)
+               return 0;
+
        h = kvcalloc(KVM_NUM_MMU_PAGES, sizeof(*h), GFP_KERNEL_ACCOUNT);
        if (!h)
                return -ENOMEM;
 
-       kvm->arch.mmu_page_hash = h;
+       /*
+        * Ensure the hash table pointer is set only after all stores to zero
+        * the memory are retired.  Pairs with the smp_load_acquire() in
+        * kvm_get_mmu_page_hash().  Note, mmu_lock must be held for write to
+        * add (or remove) shadow pages, and so readers are guaranteed to see
+        * an empty list for their current mmu_lock critical section.
+        */
+       smp_store_release(&kvm->arch.mmu_page_hash, h);
        return 0;
 }
 
        if (kvm_shadow_root_allocated(kvm))
                goto out_unlock;
 
+       r = kvm_mmu_alloc_page_hash(kvm);
+       if (r)
+               goto out_unlock;
+
        /*
-        * Check if anything actually needs to be allocated, e.g. all metadata
-        * will be allocated upfront if TDP is disabled.
+        * Check if memslot metadata actually needs to be allocated, e.g. all
+        * metadata will be allocated upfront if TDP is disabled.
         */
        if (kvm_memslots_have_rmaps(kvm) &&
            kvm_page_track_write_tracking_enabled(kvm))
        INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
        spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
 
-       r = kvm_mmu_alloc_page_hash(kvm);
-       if (r)
-               return r;
-
-       if (tdp_mmu_enabled)
+       if (tdp_mmu_enabled) {
                kvm_mmu_init_tdp_mmu(kvm);
+       } else {
+               r = kvm_mmu_alloc_page_hash(kvm);
+               if (r)
+                       return r;
+       }
 
        kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
        kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;