gfp_t gfp, swp_entry_t entry);
 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
 
-void __mem_cgroup_uncharge(struct page *page);
-static inline void mem_cgroup_uncharge(struct page *page)
+void __mem_cgroup_uncharge(struct folio *folio);
+
+/**
+ * mem_cgroup_uncharge - Uncharge a folio.
+ * @folio: Folio to uncharge.
+ *
+ * Uncharge a folio previously charged with mem_cgroup_charge().
+ */
+static inline void mem_cgroup_uncharge(struct folio *folio)
 {
        if (mem_cgroup_disabled())
                return;
-       __mem_cgroup_uncharge(page);
+       __mem_cgroup_uncharge(folio);
 }
 
 void __mem_cgroup_uncharge_list(struct list_head *page_list);
 {
 }
 
-static inline void mem_cgroup_uncharge(struct page *page)
+static inline void mem_cgroup_uncharge(struct folio *folio)
 {
 }
 
 
        if (xas_error(&xas)) {
                error = xas_error(&xas);
                if (charged)
-                       mem_cgroup_uncharge(page);
+                       mem_cgroup_uncharge(page_folio(page));
                goto error;
        }
 
 
        mmap_write_unlock(mm);
 out_nolock:
        if (!IS_ERR_OR_NULL(*hpage))
-               mem_cgroup_uncharge(*hpage);
+               mem_cgroup_uncharge(page_folio(*hpage));
        trace_mm_collapse_huge_page(mm, isolated, result);
        return;
 }
 out:
        VM_BUG_ON(!list_empty(&pagelist));
        if (!IS_ERR_OR_NULL(*hpage))
-               mem_cgroup_uncharge(*hpage);
+               mem_cgroup_uncharge(page_folio(*hpage));
        /* TODO: tracepoints */
 }
 
 
        css_put(&memcg->css);
 }
 
-/**
- * __mem_cgroup_uncharge - uncharge a page
- * @page: page to uncharge
- *
- * Uncharge a page previously charged with __mem_cgroup_charge().
- */
-void __mem_cgroup_uncharge(struct page *page)
+void __mem_cgroup_uncharge(struct folio *folio)
 {
        struct uncharge_gather ug;
 
-       /* Don't touch page->lru of any random page, pre-check: */
-       if (!page_memcg(page))
+       /* Don't touch folio->lru of any random page, pre-check: */
+       if (!folio_memcg(folio))
                return;
 
        uncharge_gather_clear(&ug);
-       uncharge_folio(page_folio(page), &ug);
+       uncharge_folio(folio, &ug);
        uncharge_batch(&ug);
 }
 
 
                 * Poisoned page might never drop its ref count to 0 so we have
                 * to uncharge it manually from its memcg.
                 */
-               mem_cgroup_uncharge(p);
+               mem_cgroup_uncharge(page_folio(p));
 
                /*
                 * drop the page count elevated by isolate_lru_page()
 
 
        __ClearPageWaiters(page);
 
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
 
        /*
         * When a device_private page is freed, the page->mapping field
 
 
 void free_compound_page(struct page *page)
 {
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
        free_the_page(page, compound_order(page));
 }
 
 
 static void __put_single_page(struct page *page)
 {
        __page_cache_release(page);
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
        free_unref_page(page, 0);
 }