return (index << compound_order(page_head)) + compound_idx;
 }
 
-static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
+static struct page *alloc_buddy_huge_page(struct hstate *h,
                gfp_t gfp_mask, int nid, nodemask_t *nmask)
 {
        int order = huge_page_order(h);
        return page;
 }
 
+/*
+ * Common helper to allocate a fresh hugetlb page. All specific allocators
+ * should use this function to get new hugetlb pages
+ */
+static struct page *alloc_fresh_huge_page(struct hstate *h,
+               gfp_t gfp_mask, int nid, nodemask_t *nmask)
+{
+       struct page *page;
+
+       if (hstate_is_gigantic(h))
+               page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
+       else
+               page = alloc_buddy_huge_page(h, gfp_mask,
+                               nid, nmask);
+       if (!page)
+               return NULL;
+
+       if (hstate_is_gigantic(h))
+               prep_compound_gigantic_page(page, huge_page_order(h));
+       prep_new_huge_page(h, page, page_to_nid(page));
+
+       return page;
+}
+
 /*
  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
  * manner.
  */
-static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
 {
        struct page *page;
        int nr_nodes, node;
        gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
 
        for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
-               if (hstate_is_gigantic(h))
-                       page = alloc_gigantic_page(h, gfp_mask,
-                                       node, nodes_allowed);
-               else
-                       page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask,
-                                       node, nodes_allowed);
+               page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
                if (page)
                        break;
-
        }
 
        if (!page)
                return 0;
 
-       if (hstate_is_gigantic(h))
-               prep_compound_gigantic_page(page, huge_page_order(h));
-       prep_new_huge_page(h, page, page_to_nid(page));
        put_page(page); /* free it into the hugepage allocator */
 
        return 1;
 /*
  * Allocates a fresh surplus page from the page allocator.
  */
-static struct page *__alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
+static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
                int nid, nodemask_t *nmask)
 {
        struct page *page = NULL;
                goto out_unlock;
        spin_unlock(&hugetlb_lock);
 
-       page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask, nid, nmask);
+       page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
        if (!page)
-               goto out_unlock;
+               return NULL;
 
        spin_lock(&hugetlb_lock);
        /*
                put_page(page);
                page = NULL;
        } else {
-               int r_nid;
-
                h->surplus_huge_pages++;
-               h->nr_huge_pages++;
-               INIT_LIST_HEAD(&page->lru);
-               r_nid = page_to_nid(page);
-               set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
-               set_hugetlb_cgroup(page, NULL);
-               h->nr_huge_pages_node[r_nid]++;
-               h->surplus_huge_pages_node[r_nid]++;
+               h->nr_huge_pages_node[page_to_nid(page)]++;
        }
 
 out_unlock:
        return page;
 }
 
-static struct page *__alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
+static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
                int nid, nodemask_t *nmask)
 {
        struct page *page;
        if (hstate_is_gigantic(h))
                return NULL;
 
-       page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask, nid, nmask);
+       page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
        if (!page)
                return NULL;
 
         * We do not account these pages as surplus because they are only
         * temporary and will be released properly on the last reference
         */
-       prep_new_huge_page(h, page, page_to_nid(page));
        SetPageHugeTemporary(page);
 
        return page;
  * Use the VMA's mpolicy to allocate a huge page from the buddy.
  */
 static
-struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
+struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
                struct vm_area_struct *vma, unsigned long addr)
 {
        struct page *page;
        nodemask_t *nodemask;
 
        nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
-       page = __alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
+       page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
        mpol_cond_put(mpol);
 
        return page;
        spin_unlock(&hugetlb_lock);
 
        if (!page)
-               page = __alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
+               page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
 
        return page;
 }
        }
        spin_unlock(&hugetlb_lock);
 
-       return __alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
+       return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
 }
 
 /*
 retry:
        spin_unlock(&hugetlb_lock);
        for (i = 0; i < needed; i++) {
-               page = __alloc_surplus_huge_page(h, htlb_alloc_mask(h),
+               page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
                                NUMA_NO_NODE, NULL);
                if (!page) {
                        alloc_ok = false;
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
        if (!page) {
                spin_unlock(&hugetlb_lock);
-               page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
+               page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
                if (!page)
                        goto out_uncharge_cgroup;
                if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
                if (hstate_is_gigantic(h)) {
                        if (!alloc_bootmem_huge_page(h))
                                break;
-               } else if (!alloc_fresh_huge_page(h,
+               } else if (!alloc_pool_huge_page(h,
                                         &node_states[N_MEMORY]))
                        break;
                cond_resched();
         * First take pages out of surplus state.  Then make up the
         * remaining difference by allocating fresh huge pages.
         *
-        * We might race with __alloc_surplus_huge_page() here and be unable
+        * We might race with alloc_surplus_huge_page() here and be unable
         * to convert a surplus huge page to a normal huge page. That is
         * not critical, though, it just means the overall size of the
         * pool might be one hugepage larger than it needs to be, but
                /* yield cpu to avoid soft lockup */
                cond_resched();
 
-               ret = alloc_fresh_huge_page(h, nodes_allowed);
+               ret = alloc_pool_huge_page(h, nodes_allowed);
                spin_lock(&hugetlb_lock);
                if (!ret)
                        goto out;
         * By placing pages into the surplus state independent of the
         * overcommit value, we are allowing the surplus pool size to
         * exceed overcommit. There are few sane options here. Since
-        * __alloc_surplus_huge_page() is checking the global counter,
+        * alloc_surplus_huge_page() is checking the global counter,
         * though, we'll note that we're not allowed to exceed surplus
         * and won't grow the pool anywhere else. Not until one of the
         * sysctls are changed, or the surplus pages go out of use.