* Return: %0 on success, negative error code otherwise.
  */
 static int vmemmap_remap_split(unsigned long start, unsigned long end,
-                               unsigned long reuse)
+                              unsigned long reuse)
 {
        int ret;
        struct vmemmap_remap_walk walk = {
 static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
 core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
 
-static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio, unsigned long flags)
+static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
+                                          struct folio *folio, unsigned long flags)
 {
        int ret;
-       struct page *head = &folio->page;
-       unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
+       unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
        unsigned long vmemmap_reuse;
 
-       VM_WARN_ON_ONCE(!PageHuge(head));
+       VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
        if (!folio_test_hugetlb_vmemmap_optimized(folio))
                return 0;
 
        list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
                if (folio_test_hugetlb_vmemmap_optimized(folio)) {
                        ret = __hugetlb_vmemmap_restore_folio(h, folio,
-                                               VMEMMAP_REMAP_NO_TLB_FLUSH);
+                                                             VMEMMAP_REMAP_NO_TLB_FLUSH);
                        if (ret)
                                break;
                        restored++;
 }
 
 /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
-static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
+static bool vmemmap_should_optimize_folio(const struct hstate *h, struct folio *folio)
 {
-       if (HPageVmemmapOptimized((struct page *)head))
+       if (folio_test_hugetlb_vmemmap_optimized(folio))
                return false;
 
        if (!READ_ONCE(vmemmap_optimize_enabled))
 }
 
 static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
-                                       struct folio *folio,
-                                       struct list_head *vmemmap_pages,
-                                       unsigned long flags)
+                                           struct folio *folio,
+                                           struct list_head *vmemmap_pages,
+                                           unsigned long flags)
 {
        int ret = 0;
-       struct page *head = &folio->page;
-       unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
+       unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
        unsigned long vmemmap_reuse;
 
-       VM_WARN_ON_ONCE(!PageHuge(head));
-       if (!vmemmap_should_optimize(h, head))
+       VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
+       if (!vmemmap_should_optimize_folio(h, folio))
                return ret;
 
        static_branch_inc(&hugetlb_optimize_vmemmap_key);
         * the caller.
         */
        ret = vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse,
-                                                       vmemmap_pages, flags);
+                                vmemmap_pages, flags);
        if (ret) {
                static_branch_dec(&hugetlb_optimize_vmemmap_key);
                folio_clear_hugetlb_vmemmap_optimized(folio);
        free_vmemmap_page_list(&vmemmap_pages);
 }
 
-static int hugetlb_vmemmap_split(const struct hstate *h, struct page *head)
+static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *folio)
 {
-       unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
+       unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
        unsigned long vmemmap_reuse;
 
-       if (!vmemmap_should_optimize(h, head))
+       if (!vmemmap_should_optimize_folio(h, folio))
                return 0;
 
        vmemmap_end     = vmemmap_start + hugetlb_vmemmap_size(h);
        LIST_HEAD(vmemmap_pages);
 
        list_for_each_entry(folio, folio_list, lru) {
-               int ret = hugetlb_vmemmap_split(h, &folio->page);
+               int ret = hugetlb_vmemmap_split_folio(h, folio);
 
                /*
                 * Spliting the PMD requires allocating a page, thus lets fail
        flush_tlb_all();
 
        list_for_each_entry(folio, folio_list, lru) {
-               int ret = __hugetlb_vmemmap_optimize_folio(h, folio,
-                                               &vmemmap_pages,
-                                               VMEMMAP_REMAP_NO_TLB_FLUSH);
+               int ret;
+
+               ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
+                                                      VMEMMAP_REMAP_NO_TLB_FLUSH);
 
                /*
                 * Pages to be freed may have been accumulated.  If we
                        flush_tlb_all();
                        free_vmemmap_page_list(&vmemmap_pages);
                        INIT_LIST_HEAD(&vmemmap_pages);
-                       __hugetlb_vmemmap_optimize_folio(h, folio,
-                                               &vmemmap_pages,
-                                               VMEMMAP_REMAP_NO_TLB_FLUSH);
+                       __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
+                                                        VMEMMAP_REMAP_NO_TLB_FLUSH);
                }
        }