* If shared is set, this function is operating under the MMU lock in read
  * mode.
  */
-#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)\
-       for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);        \
-            ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;      \
-            _root = tdp_mmu_next_root(_kvm, _root, _only_valid))       \
-               if (kvm_mmu_page_as_id(_root) != _as_id) {              \
+#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)   \
+       for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);                \
+            ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;              \
+            _root = tdp_mmu_next_root(_kvm, _root, _only_valid))               \
+               if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) {       \
                } else
 
 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)    \
  * Holding mmu_lock for write obviates the need for RCU protection as the list
  * is guaranteed to be stable.
  */
-#define for_each_tdp_mmu_root(_kvm, _root, _as_id)                     \
-       list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)     \
-               if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&    \
-                   kvm_mmu_page_as_id(_root) != _as_id) {              \
+#define for_each_tdp_mmu_root(_kvm, _root, _as_id)                             \
+       list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)             \
+               if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&            \
+                   _as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) {       \
                } else
 
 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)