unsigned int n_used_mmu_pages;
        unsigned int n_requested_mmu_pages;
        unsigned int n_max_mmu_pages;
+       unsigned int indirect_shadow_pages;
        atomic_t invlpg_counter;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        /*
 
                linfo = lpage_info_slot(gfn, slot, i);
                linfo->write_count += 1;
        }
+       kvm->arch.indirect_shadow_pages++;
 }
 
 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
                linfo->write_count -= 1;
                WARN_ON(linfo->write_count < 0);
        }
+       kvm->arch.indirect_shadow_pages--;
 }
 
 static int has_wrprotected_page(struct kvm *kvm,
        int level, npte, invlpg_counter, r, flooded = 0;
        bool remote_flush, local_flush, zap_page;
 
+       /*
+        * If we don't have indirect shadow pages, it means no page is
+        * write-protected, so we can exit simply.
+        */
+       if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+               return;
+
        zap_page = remote_flush = local_flush = false;
        offset = offset_in_page(gpa);