unsigned int free_huge_pages_node[MAX_NUMNODES];
        unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
-       unsigned int nr_free_vmemmap_pages;
+       unsigned int optimize_vmemmap_pages;
 #endif
 #ifdef CONFIG_CGROUP_HUGETLB
        /* cgroup control files */
 
        if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
                return;
 
-       if (alloc_huge_page_vmemmap(h, page)) {
+       if (hugetlb_vmemmap_alloc(h, page)) {
                spin_lock_irq(&hugetlb_lock);
                /*
                 * If we cannot allocate vmemmap pages, just refuse to free the
 
 static inline void flush_free_hpage_work(struct hstate *h)
 {
-       if (free_vmemmap_pages_per_hpage(h))
+       if (hugetlb_optimize_vmemmap_pages(h))
                flush_work(&free_hpage_work);
 }
 
 
 static void __prep_new_huge_page(struct hstate *h, struct page *page)
 {
-       free_huge_page_vmemmap(h, page);
+       hugetlb_vmemmap_free(h, page);
        INIT_LIST_HEAD(&page->lru);
        set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
        hugetlb_set_page_subpool(page, NULL);
                 * Attempt to allocate vmemmmap here so that we can take
                 * appropriate action on failure.
                 */
-               rc = alloc_huge_page_vmemmap(h, head);
+               rc = hugetlb_vmemmap_alloc(h, head);
                if (!rc) {
                        /*
                         * Move PageHWPoison flag from head page to the raw
        remove_hugetlb_page_for_demote(h, page, false);
        spin_unlock_irq(&hugetlb_lock);
 
-       rc = alloc_huge_page_vmemmap(h, page);
+       rc = hugetlb_vmemmap_alloc(h, page);
        if (rc) {
                /* Allocation of vmemmmap failed, we can not demote page */
                spin_lock_irq(&hugetlb_lock);
 
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Free some vmemmap pages of HugeTLB
+ * Optimize vmemmap pages associated with HugeTLB
  *
  * Copyright (c) 2020, Bytedance. All rights reserved.
  *
                        hugetlb_free_vmemmap_enabled_key);
 EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled_key);
 
-static int __init early_hugetlb_free_vmemmap_param(char *buf)
+static int __init hugetlb_vmemmap_early_param(char *buf)
 {
        /* We cannot optimize if a "struct page" crosses page boundaries. */
        if (!is_power_of_2(sizeof(struct page))) {
 
        return 0;
 }
-early_param("hugetlb_free_vmemmap", early_hugetlb_free_vmemmap_param);
-
-static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h)
-{
-       return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT;
-}
+early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_early_param);
 
 /*
  * Previously discarded vmemmap pages will be allocated and remapping
  * after this function returns zero.
  */
-int alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
 {
        int ret;
        unsigned long vmemmap_addr = (unsigned long)head;
-       unsigned long vmemmap_end, vmemmap_reuse;
+       unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
 
        if (!HPageVmemmapOptimized(head))
                return 0;
 
-       vmemmap_addr += RESERVE_VMEMMAP_SIZE;
-       vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h);
-       vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
+       vmemmap_addr    += RESERVE_VMEMMAP_SIZE;
+       vmemmap_pages   = hugetlb_optimize_vmemmap_pages(h);
+       vmemmap_end     = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
+       vmemmap_reuse   = vmemmap_addr - PAGE_SIZE;
+
        /*
         * The pages which the vmemmap virtual address range [@vmemmap_addr,
         * @vmemmap_end) are mapped to are freed to the buddy allocator, and
        return ret;
 }
 
-void free_huge_page_vmemmap(struct hstate *h, struct page *head)
+void hugetlb_vmemmap_free(struct hstate *h, struct page *head)
 {
        unsigned long vmemmap_addr = (unsigned long)head;
-       unsigned long vmemmap_end, vmemmap_reuse;
+       unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
 
-       if (!free_vmemmap_pages_per_hpage(h))
+       vmemmap_pages = hugetlb_optimize_vmemmap_pages(h);
+       if (!vmemmap_pages)
                return;
 
-       vmemmap_addr += RESERVE_VMEMMAP_SIZE;
-       vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h);
-       vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
+       vmemmap_addr    += RESERVE_VMEMMAP_SIZE;
+       vmemmap_end     = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
+       vmemmap_reuse   = vmemmap_addr - PAGE_SIZE;
 
        /*
         * Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end)
         * hugetlbpage.rst for more details.
         */
        if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
-               h->nr_free_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
+               h->optimize_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
 
-       pr_info("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,
-               h->name);
+       pr_info("can optimize %d vmemmap pages for %s\n",
+               h->optimize_vmemmap_pages, h->name);
 }
 
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Free some vmemmap pages of HugeTLB
+ * Optimize vmemmap pages associated with HugeTLB
  *
  * Copyright (c) 2020, Bytedance. All rights reserved.
  *
 #include <linux/hugetlb.h>
 
 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
-int alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
-void free_huge_page_vmemmap(struct hstate *h, struct page *head);
+int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head);
+void hugetlb_vmemmap_free(struct hstate *h, struct page *head);
 void hugetlb_vmemmap_init(struct hstate *h);
 
 /*
- * How many vmemmap pages associated with a HugeTLB page that can be freed
- * to the buddy allocator.
+ * How many vmemmap pages associated with a HugeTLB page that can be
+ * optimized and freed to the buddy allocator.
  */
-static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
+static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h)
 {
-       return h->nr_free_vmemmap_pages;
+       return h->optimize_vmemmap_pages;
 }
 #else
-static inline int alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+static inline int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
 {
        return 0;
 }
 
-static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
+static inline void hugetlb_vmemmap_free(struct hstate *h, struct page *head)
 {
 }
 
 {
 }
 
-static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
+static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h)
 {
        return 0;
 }