unsigned long tce_value, unsigned long npages);
 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                             unsigned long ioba);
-extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
-extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
+extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
+extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
 extern int kvmppc_core_init_vm(struct kvm *kvm);
 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
 extern void kvmppc_core_free_memslot(struct kvm *kvm,
 
        }
 
        kvm->arch.hpt_cma_alloc = 0;
-       page = kvm_alloc_hpt(1ul << (order - PAGE_SHIFT));
+       page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
        if (page) {
                hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
                memset((void *)hpt, 0, (1ul << order));
 
  out_freehpt:
        if (kvm->arch.hpt_cma_alloc)
-               kvm_release_hpt(page, 1 << (order - PAGE_SHIFT));
+               kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
        else
                free_pages(hpt, order - PAGE_SHIFT);
        return -ENOMEM;
 {
        vfree(kvm->arch.revmap);
        if (kvm->arch.hpt_cma_alloc)
-               kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt),
-                               1 << (kvm->arch.hpt_order - PAGE_SHIFT));
+               kvm_free_hpt_cma(virt_to_page(kvm->arch.hpt_virt),
+                                1 << (kvm->arch.hpt_order - PAGE_SHIFT));
        else if (kvm->arch.hpt_virt)
                free_pages(kvm->arch.hpt_virt,
                           kvm->arch.hpt_order - PAGE_SHIFT);
 
 }
 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
 
-struct page *kvm_alloc_hpt(unsigned long nr_pages)
+struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
 {
        VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 
        return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
 }
-EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
+EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
 
-void kvm_release_hpt(struct page *page, unsigned long nr_pages)
+void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
 {
        cma_release(kvm_cma, page, nr_pages);
 }
-EXPORT_SYMBOL_GPL(kvm_release_hpt);
+EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
 
 /**
  * kvm_cma_reserve() - reserve area for kvm hash pagetable