return true;
 }
 
-static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
-                                          const struct kvm_memory_slot *slot)
+static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
+                                        const struct kvm_memory_slot *slot)
 {
        unsigned long idx;
 
        sp = sptep_to_sp(spte);
        kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
        slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-       rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
+       rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
        return pte_list_add(vcpu, spte, rmap_head);
 }
 
        slots = kvm_memslots_for_spte_role(kvm, sp->role);
 
        slot = __gfn_to_memslot(slots, gfn);
-       rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
+       rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
 
        __pte_list_remove(spte, rmap_head);
 }
                return;
 
        while (mask) {
-               rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
-                                         PG_LEVEL_4K, slot);
+               rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+                                       PG_LEVEL_4K, slot);
                __rmap_write_protect(kvm, rmap_head, false);
 
                /* clear the first set bit */
                return;
 
        while (mask) {
-               rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
-                                         PG_LEVEL_4K, slot);
+               rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+                                       PG_LEVEL_4K, slot);
                __rmap_clear_dirty(kvm, rmap_head, slot);
 
                /* clear the first set bit */
 
        if (kvm_memslots_have_rmaps(kvm)) {
                for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
-                       rmap_head = __gfn_to_rmap(gfn, i, slot);
+                       rmap_head = gfn_to_rmap(gfn, i, slot);
                        write_protected |= __rmap_write_protect(kvm, rmap_head, true);
                }
        }
 {
        iterator->level = level;
        iterator->gfn = iterator->start_gfn;
-       iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
-       iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
-                                          iterator->slot);
+       iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
+       iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
 }
 
 static void
 
        sp = sptep_to_sp(spte);
        slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-       rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
+       rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
 
        kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
        kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
 
                return;
        }
 
-       rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot);
+       rmap_head = gfn_to_rmap(gfn, rev_sp->role.level, slot);
        if (!rmap_head->val) {
                if (!__ratelimit(&ratelimit_state))
                        return;
 
        slots = kvm_memslots_for_spte_role(kvm, sp->role);
        slot = __gfn_to_memslot(slots, sp->gfn);
-       rmap_head = __gfn_to_rmap(sp->gfn, PG_LEVEL_4K, slot);
+       rmap_head = gfn_to_rmap(sp->gfn, PG_LEVEL_4K, slot);
 
        for_each_rmap_spte(rmap_head, &iter, sptep) {
                if (is_writable_pte(*sptep))