struct hugetlb_cgroup **ptr);
 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
                                         struct hugetlb_cgroup *h_cg,
-                                        struct page *page);
+                                        struct folio *folio);
 extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
                                              struct hugetlb_cgroup *h_cg,
-                                             struct page *page);
+                                             struct folio *folio);
 extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
                                         struct folio *folio);
 extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
 
 static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
                                                struct hugetlb_cgroup *h_cg,
-                                               struct page *page)
+                                               struct folio *folio)
 {
 }
 
 static inline void
 hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
                                  struct hugetlb_cgroup *h_cg,
-                                 struct page *page)
+                                 struct folio *folio)
 {
 }
 
 
        return h->free_huge_pages - h->resv_huge_pages;
 }
 
-static struct page *dequeue_huge_page_vma(struct hstate *h,
+static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
                                struct vm_area_struct *vma,
                                unsigned long address, int avoid_reserve,
                                long chg)
        }
 
        mpol_cond_put(mpol);
-       return &folio->page;
+       return folio;
 
 err:
        return NULL;
  * Use the VMA's mpolicy to allocate a huge page from the buddy.
  */
 static
-struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
+struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
                struct vm_area_struct *vma, unsigned long addr)
 {
        struct folio *folio = NULL;
        if (!folio)
                folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
        mpol_cond_put(mpol);
-       return &folio->page;
+       return folio;
 }
 
 /* page migration callback function */
 {
        struct hugepage_subpool *spool = subpool_vma(vma);
        struct hstate *h = hstate_vma(vma);
-       struct page *page;
        struct folio *folio;
        long map_chg, map_commit;
        long gbl_chg;
         * from the global free pool (global change).  gbl_chg == 0 indicates
         * a reservation exists for the allocation.
         */
-       page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
-       if (!page) {
+       folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
+       if (!folio) {
                spin_unlock_irq(&hugetlb_lock);
-               page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
-               if (!page)
+               folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
+               if (!folio)
                        goto out_uncharge_cgroup;
                spin_lock_irq(&hugetlb_lock);
                if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
-                       SetHPageRestoreReserve(page);
+                       folio_set_hugetlb_restore_reserve(folio);
                        h->resv_huge_pages--;
                }
-               list_add(&page->lru, &h->hugepage_activelist);
-               set_page_refcounted(page);
+               list_add(&folio->lru, &h->hugepage_activelist);
+               folio_ref_unfreeze(folio, 1);
                /* Fall through */
        }
-       folio = page_folio(page);
-       hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
+
+       hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
        /* If allocation is not consuming a reservation, also store the
         * hugetlb_cgroup pointer on the page.
         */
        if (deferred_reserve) {
                hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
-                                                 h_cg, page);
+                                                 h_cg, folio);
        }
 
        spin_unlock_irq(&hugetlb_lock);
 
-       hugetlb_set_page_subpool(page, spool);
+       hugetlb_set_folio_subpool(folio, spool);
 
        map_commit = vma_commit_reservation(h, vma, addr);
        if (unlikely(map_chg > map_commit)) {
                        hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
                                        pages_per_huge_page(h), folio);
        }
-       return page;
+       return &folio->page;
 
 out_uncharge_cgroup:
        hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
 
 
 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
                                  struct hugetlb_cgroup *h_cg,
-                                 struct page *page)
+                                 struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
-
        __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
 }
 
 void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
                                       struct hugetlb_cgroup *h_cg,
-                                      struct page *page)
+                                      struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
-
        __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
 }