unsigned long *pheadroom, unsigned long *pdirty,
                         unsigned long *pwriteback);
 
-void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
+void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
                                             struct bdi_writeback *wb);
 
 static inline void mem_cgroup_track_foreign_dirty(struct page *page,
                                                  struct bdi_writeback *wb)
 {
+       struct folio *folio = page_folio(page);
        if (mem_cgroup_disabled())
                return;
 
-       if (unlikely(&page_memcg(page)->css != wb->memcg_css))
-               mem_cgroup_track_foreign_dirty_slowpath(page, wb);
+       if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
+               mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
 }
 
 void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
 
 
 TRACE_EVENT(track_foreign_dirty,
 
-       TP_PROTO(struct page *page, struct bdi_writeback *wb),
+       TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
 
-       TP_ARGS(page, wb),
+       TP_ARGS(folio, wb),
 
        TP_STRUCT__entry(
                __array(char,           name, 32)
        ),
 
        TP_fast_assign(
-               struct address_space *mapping = page_mapping(page);
+               struct address_space *mapping = folio_mapping(folio);
                struct inode *inode = mapping ? mapping->host : NULL;
 
                strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
                __entry->ino            = inode ? inode->i_ino : 0;
                __entry->memcg_id       = wb->memcg_css->id;
                __entry->cgroup_ino     = __trace_wb_assign_cgroup(wb);
-               __entry->page_cgroup_ino = cgroup_ino(page_memcg(page)->css.cgroup);
+               __entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
        ),
 
        TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
 
  * As being wrong occasionally doesn't matter, updates and accesses to the
  * records are lockless and racy.
  */
-void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
+void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
                                             struct bdi_writeback *wb)
 {
-       struct mem_cgroup *memcg = page_memcg(page);
+       struct mem_cgroup *memcg = folio_memcg(folio);
        struct memcg_cgwb_frn *frn;
        u64 now = get_jiffies_64();
        u64 oldest_at = now;
        int oldest = -1;
        int i;
 
-       trace_track_foreign_dirty(page, wb);
+       trace_track_foreign_dirty(folio, wb);
 
        /*
         * Pick the slot to use.  If there is already a slot for @wb, keep