return 0;
 }
 
+static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
+{
+       return cache->nobjs;
+}
+
 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
                                  struct kmem_cache *cache)
 {
        return &linfo->rmap_pde;
 }
 
+static bool rmap_can_add(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mmu_memory_cache *cache;
+
+       cache = &vcpu->arch.mmu_pte_list_desc_cache;
+       return mmu_memory_cache_free_objects(cache);
+}
+
 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 {
        struct kvm_mmu_page *sp;
                break;
        }
 
+       /*
+        * No need to care whether allocation memory is successful
+        * or not since pte prefetch is skiped if it does not have
+        * enough objects in the cache.
+        */
+       mmu_topup_memory_caches(vcpu);
        spin_lock(&vcpu->kvm->mmu_lock);
        if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
                gentry = 0;
                        mmu_page_zap_pte(vcpu->kvm, sp, spte);
                        if (gentry &&
                              !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
-                             & mask.word))
+                             & mask.word) && rmap_can_add(vcpu))
                                mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
                        if (!remote_flush && need_remote_flush(entry, *spte))
                                remote_flush = true;
                goto out;
        }
 
-       r = mmu_topup_memory_caches(vcpu);
-       if (r)
-               goto out;
-
        er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
 
        switch (er) {