unsigned long flags;
        struct folio *folio, *tmp_f;
 
+       /* Send list for bulk vmemmap optimization processing */
+       hugetlb_vmemmap_optimize_folios(h, folio_list);
+
        /* Add all new pool pages to free lists in one lock cycle */
        spin_lock_irqsave(&hugetlb_lock, flags);
        list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
        prep_compound_head((struct page *)folio, huge_page_order(h));
 }
 
+static void __init prep_and_add_bootmem_folios(struct hstate *h,
+                                       struct list_head *folio_list)
+{
+       unsigned long flags;
+       struct folio *folio, *tmp_f;
+
+       /* Send list for bulk vmemmap optimization processing */
+       hugetlb_vmemmap_optimize_folios(h, folio_list);
+
+       /* Add all new pool pages to free lists in one lock cycle */
+       spin_lock_irqsave(&hugetlb_lock, flags);
+       list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
+               if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
+                       /*
+                        * If HVO fails, initialize all tail struct pages
+                        * We do not worry about potential long lock hold
+                        * time as this is early in boot and there should
+                        * be no contention.
+                        */
+                       hugetlb_folio_init_tail_vmemmap(folio,
+                                       HUGETLB_VMEMMAP_RESERVE_PAGES,
+                                       pages_per_huge_page(h));
+               }
+               __prep_account_new_huge_page(h, folio_nid(folio));
+               enqueue_hugetlb_folio(h, folio);
+       }
+       spin_unlock_irqrestore(&hugetlb_lock, flags);
+}
+
 /*
  * Put bootmem huge pages into the standard lists after mem_map is up.
  * Note: This only applies to gigantic (order > MAX_ORDER) pages.
                 * in this list.  If so, process each size separately.
                 */
                if (h != prev_h && prev_h != NULL)
-                       prep_and_add_allocated_folios(prev_h, &folio_list);
+                       prep_and_add_bootmem_folios(prev_h, &folio_list);
                prev_h = h;
 
                VM_BUG_ON(!hstate_is_gigantic(h));
 
                hugetlb_folio_init_vmemmap(folio, h,
                                           HUGETLB_VMEMMAP_RESERVE_PAGES);
-               __prep_new_hugetlb_folio(h, folio);
-               /* If HVO fails, initialize all tail struct pages */
-               if (!HPageVmemmapOptimized(&folio->page))
-                       hugetlb_folio_init_tail_vmemmap(folio,
-                                               HUGETLB_VMEMMAP_RESERVE_PAGES,
-                                               pages_per_huge_page(h));
+               init_new_hugetlb_folio(h, folio);
                list_add(&folio->lru, &folio_list);
 
                /*
                cond_resched();
        }
 
-       prep_and_add_allocated_folios(h, &folio_list);
+       prep_and_add_bootmem_folios(h, &folio_list);
 }
 
 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
 
 /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
 static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
 {
+       if (HPageVmemmapOptimized((struct page *)head))
+               return false;
+
        if (!READ_ONCE(vmemmap_optimize_enabled))
                return false;
 
                SetHPageVmemmapOptimized(head);
 }
 
+void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
+{
+       struct folio *folio;
+
+       list_for_each_entry(folio, folio_list, lru)
+               hugetlb_vmemmap_optimize(h, &folio->page);
+}
+
 static struct ctl_table hugetlb_vmemmap_sysctls[] = {
        {
                .procname       = "hugetlb_optimize_vmemmap",