int return_handled;
 };
 
-#define KVM_MMU_PAGE_CACHE_NR_OBJS     32
-
-struct kvm_mmu_page_cache {
-       int nobjs;
-       void *objects[KVM_MMU_PAGE_CACHE_NR_OBJS];
-};
-
 struct kvm_cpu_trap {
        unsigned long sepc;
        unsigned long scause;
        struct kvm_sbi_context sbi_context;
 
        /* Cache pages needed to program page tables with spinlock held */
-       struct kvm_mmu_page_cache mmu_page_cache;
+       struct kvm_mmu_memory_cache mmu_page_cache;
 
        /* VCPU power-off state */
        bool power_off;
 int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
                         struct kvm_memory_slot *memslot,
                         gpa_t gpa, unsigned long hva, bool is_write);
-void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu);
 int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm);
 void kvm_riscv_stage2_free_pgd(struct kvm *kvm);
 void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu);
 
        return 0;
 }
 
-static int stage2_cache_topup(struct kvm_mmu_page_cache *pcache,
-                             int min, int max)
-{
-       void *page;
-
-       BUG_ON(max > KVM_MMU_PAGE_CACHE_NR_OBJS);
-       if (pcache->nobjs >= min)
-               return 0;
-       while (pcache->nobjs < max) {
-               page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-               if (!page)
-                       return -ENOMEM;
-               pcache->objects[pcache->nobjs++] = page;
-       }
-
-       return 0;
-}
-
-static void stage2_cache_flush(struct kvm_mmu_page_cache *pcache)
-{
-       while (pcache && pcache->nobjs)
-               free_page((unsigned long)pcache->objects[--pcache->nobjs]);
-}
-
-static void *stage2_cache_alloc(struct kvm_mmu_page_cache *pcache)
-{
-       void *p;
-
-       if (!pcache)
-               return NULL;
-
-       BUG_ON(!pcache->nobjs);
-       p = pcache->objects[--pcache->nobjs];
-
-       return p;
-}
-
 static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr,
                                  pte_t **ptepp, u32 *ptep_level)
 {
 }
 
 static int stage2_set_pte(struct kvm *kvm, u32 level,
-                          struct kvm_mmu_page_cache *pcache,
+                          struct kvm_mmu_memory_cache *pcache,
                           gpa_t addr, const pte_t *new_pte)
 {
        u32 current_level = stage2_pgd_levels - 1;
                        return -EEXIST;
 
                if (!pte_val(*ptep)) {
-                       next_ptep = stage2_cache_alloc(pcache);
+                       if (!pcache)
+                               return -ENOMEM;
+                       next_ptep = kvm_mmu_memory_cache_alloc(pcache);
                        if (!next_ptep)
                                return -ENOMEM;
                        *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
 }
 
 static int stage2_map_page(struct kvm *kvm,
-                          struct kvm_mmu_page_cache *pcache,
+                          struct kvm_mmu_memory_cache *pcache,
                           gpa_t gpa, phys_addr_t hpa,
                           unsigned long page_size,
                           bool page_rdonly, bool page_exec)
        int ret = 0;
        unsigned long pfn;
        phys_addr_t addr, end;
-       struct kvm_mmu_page_cache pcache = { 0, };
+       struct kvm_mmu_memory_cache pcache;
+
+       memset(&pcache, 0, sizeof(pcache));
+       pcache.gfp_zero = __GFP_ZERO;
 
        end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
        pfn = __phys_to_pfn(hpa);
                if (!writable)
                        pte = pte_wrprotect(pte);
 
-               ret = stage2_cache_topup(&pcache,
-                                        stage2_pgd_levels,
-                                        KVM_MMU_PAGE_CACHE_NR_OBJS);
+               ret = kvm_mmu_topup_memory_cache(&pcache, stage2_pgd_levels);
                if (ret)
                        goto out;
 
        }
 
 out:
-       stage2_cache_flush(&pcache);
+       kvm_mmu_free_memory_cache(&pcache);
        return ret;
 }
 
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct vm_area_struct *vma;
        struct kvm *kvm = vcpu->kvm;
-       struct kvm_mmu_page_cache *pcache = &vcpu->arch.mmu_page_cache;
+       struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache;
        bool logging = (memslot->dirty_bitmap &&
                        !(memslot->flags & KVM_MEM_READONLY)) ? true : false;
        unsigned long vma_pagesize, mmu_seq;
        }
 
        /* We need minimum second+third level pages */
-       ret = stage2_cache_topup(pcache, stage2_pgd_levels,
-                                KVM_MMU_PAGE_CACHE_NR_OBJS);
+       ret = kvm_mmu_topup_memory_cache(pcache, stage2_pgd_levels);
        if (ret) {
                kvm_err("Failed to topup stage2 cache\n");
                return ret;
        return ret;
 }
 
-void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu)
-{
-       stage2_cache_flush(&vcpu->arch.mmu_page_cache);
-}
-
 int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm)
 {
        struct page *pgd_page;