percpu_counter_add(&kvm_total_used_mmu_pages, nr);
 }
 
-static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
+static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
 {
        MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
        hlist_del(&sp->hash_link);
        return sp;
 }
 
-static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn,
-                                            union kvm_mmu_page_role role)
+static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
+                                                   gfn_t gfn,
+                                                   union kvm_mmu_page_role role)
 {
        struct hlist_head *sp_list;
        struct kvm_mmu_page *sp;
        union kvm_mmu_page_role role;
 
        role = kvm_mmu_child_role(sptep, direct, access);
-       return kvm_mmu_get_page(vcpu, gfn, role);
+       return kvm_mmu_get_shadow_page(vcpu, gfn, role);
 }
 
 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
 
        list_for_each_entry_safe(sp, nsp, invalid_list, link) {
                WARN_ON(!sp->role.invalid || sp->root_count);
-               kvm_mmu_free_page(sp);
+               kvm_mmu_free_shadow_page(sp);
        }
 }
 
        WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
        WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
 
-       sp = kvm_mmu_get_page(vcpu, gfn, role);
+       sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
        ++sp->root_count;
 
        return __pa(sp->spt);