}
 
 found:
+
+       /*
+        * Only initialize the head struct page in memmap_init_reserved_pages,
+        * rest of the struct pages will be initialized by the HugeTLB
+        * subsystem itself.
+        * The head struct page is used to get folio information by the HugeTLB
+        * subsystem like zone id and node id.
+        */
+       memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
+               huge_page_size(h) - PAGE_SIZE);
        /* Put them into a private list first because mem_map is not up yet */
        INIT_LIST_HEAD(&m->list);
        list_add(&m->list, &huge_boot_pages);
        return 1;
 }
 
+/* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
+static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
+                                       unsigned long start_page_number,
+                                       unsigned long end_page_number)
+{
+       enum zone_type zone = zone_idx(folio_zone(folio));
+       int nid = folio_nid(folio);
+       unsigned long head_pfn = folio_pfn(folio);
+       unsigned long pfn, end_pfn = head_pfn + end_page_number;
+       int ret;
+
+       for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) {
+               struct page *page = pfn_to_page(pfn);
+
+               __init_single_page(page, pfn, zone, nid);
+               prep_compound_tail((struct page *)folio, pfn - head_pfn);
+               ret = page_ref_freeze(page, 1);
+               VM_BUG_ON(!ret);
+       }
+}
+
+static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
+                                             struct hstate *h,
+                                             unsigned long nr_pages)
+{
+       int ret;
+
+       /* Prepare folio head */
+       __folio_clear_reserved(folio);
+       __folio_set_head(folio);
+       ret = page_ref_freeze(&folio->page, 1);
+       VM_BUG_ON(!ret);
+       /* Initialize the necessary tail struct pages */
+       hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages);
+       prep_compound_head((struct page *)folio, huge_page_order(h));
+}
+
 /*
  * Put bootmem huge pages into the standard lists after mem_map is up.
  * Note: This only applies to gigantic (order > MAX_ORDER) pages.
 
        list_for_each_entry(m, &huge_boot_pages, list) {
                struct page *page = virt_to_page(m);
-               struct folio *folio = page_folio(page);
+               struct folio *folio = (void *)page;
                struct hstate *h = m->hstate;
 
                VM_BUG_ON(!hstate_is_gigantic(h));
                WARN_ON(folio_ref_count(folio) != 1);
-               if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
-                       WARN_ON(folio_test_reserved(folio));
-                       prep_new_hugetlb_folio(h, folio, folio_nid(folio));
-                       free_huge_folio(folio); /* add to the hugepage allocator */
-               } else {
-                       /* VERY unlikely inflated ref count on a tail page */
-                       free_gigantic_folio(folio, huge_page_order(h));
-               }
+
+               hugetlb_folio_init_vmemmap(folio, h,
+                                          HUGETLB_VMEMMAP_RESERVE_PAGES);
+               prep_new_hugetlb_folio(h, folio, folio_nid(folio));
+               /* If HVO fails, initialize all tail struct pages */
+               if (!HPageVmemmapOptimized(&folio->page))
+                       hugetlb_folio_init_tail_vmemmap(folio,
+                                               HUGETLB_VMEMMAP_RESERVE_PAGES,
+                                               pages_per_huge_page(h));
+               free_huge_folio(folio); /* add to the hugepage allocator */
 
                /*
                 * We need to restore the 'stolen' pages to totalram_pages
                cond_resched();
        }
 }
+
 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
 {
        unsigned long i;
 
 #define _LINUX_HUGETLB_VMEMMAP_H
 #include <linux/hugetlb.h>
 
-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
-int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
-void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
-
 /*
  * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
  * Documentation/vm/vmemmap_dedup.rst.
  */
 #define HUGETLB_VMEMMAP_RESERVE_SIZE   PAGE_SIZE
+#define HUGETLB_VMEMMAP_RESERVE_PAGES  (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))
+
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
+void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
 
 static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
 {