/* Should be called with hugetlb_lock held */
 static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
                                           struct hugetlb_cgroup *h_cg,
-                                          struct page *page, bool rsvd)
+                                          struct folio *folio, bool rsvd)
 {
        if (hugetlb_cgroup_disabled() || !h_cg)
                return;
 
-       __set_hugetlb_cgroup(page_folio(page), h_cg, rsvd);
+       __set_hugetlb_cgroup(folio, h_cg, rsvd);
        if (!rsvd) {
                unsigned long usage =
-                       h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
+                       h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
                /*
                 * This write is not atomic due to fetching usage and writing
                 * to it, but that's fine because we call this with
                 * hugetlb_lock held anyway.
                 */
-               WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
+               WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
                           usage + nr_pages);
        }
 }
                                  struct hugetlb_cgroup *h_cg,
                                  struct page *page)
 {
-       __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, false);
+       struct folio *folio = page_folio(page);
+
+       __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
 }
 
 void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
                                       struct hugetlb_cgroup *h_cg,
                                       struct page *page)
 {
-       __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, true);
+       struct folio *folio = page_folio(page);
+
+       __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
 }
 
 /*