static int
 cifs_writepage_locked(struct page *page, struct writeback_control *wbc);
 
-static int cifs_write_one_page(struct page *page, struct writeback_control *wbc,
-               void *data)
+static int cifs_write_one_page(struct folio *folio,
+               struct writeback_control *wbc, void *data)
 {
        struct address_space *mapping = data;
        int ret;
 
-       ret = cifs_writepage_locked(page, wbc);
-       unlock_page(page);
+       ret = cifs_writepage_locked(&folio->page, wbc);
+       folio_unlock(folio);
        mapping_set_error(mapping, ret);
        return ret;
 }
 
        return err;
 }
 
-static int ext4_writepage_cb(struct page *page, struct writeback_control *wbc,
+static int ext4_writepage_cb(struct folio *folio, struct writeback_control *wbc,
                             void *data)
 {
-       return ext4_writepage(page, wbc);
+       return ext4_writepage(&folio->page, wbc);
 }
 
 static int ext4_do_writepages(struct mpage_da_data *mpd)
 
  *
  * However, we may have to redirty a page (see below.)
  */
-static int ext4_journalled_writepage_callback(struct page *page,
+static int ext4_journalled_writepage_callback(struct folio *folio,
                                              struct writeback_control *wbc,
                                              void *data)
 {
        struct buffer_head *bh, *head;
        struct journal_head *jh;
 
-       bh = head = page_buffers(page);
+       bh = head = folio_buffers(folio);
        do {
                /*
                 * We have to redirty a page in these cases:
                if (buffer_dirty(bh) ||
                    (jh && (jh->b_transaction != transaction ||
                            jh->b_next_transaction))) {
-                       redirty_page_for_writepage(wbc, page);
+                       folio_redirty_for_writepage(wbc, folio);
                        goto out;
                }
        } while ((bh = bh->b_this_page) != head);
 
        return false;
 }
 
-static int fuse_writepages_fill(struct page *page,
+static int fuse_writepages_fill(struct folio *folio,
                struct writeback_control *wbc, void *_data)
 {
        struct fuse_fill_wb_data *data = _data;
                        goto out_unlock;
        }
 
-       if (wpa && fuse_writepage_need_send(fc, page, ap, data)) {
+       if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) {
                fuse_writepages_send(data);
                data->wpa = NULL;
        }
                data->max_pages = 1;
 
                ap = &wpa->ia.ap;
-               fuse_write_args_fill(&wpa->ia, data->ff, page_offset(page), 0);
+               fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0);
                wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
                wpa->next = NULL;
                ap->args.in_pages = true;
                ap->num_pages = 0;
                wpa->inode = inode;
        }
-       set_page_writeback(page);
+       folio_start_writeback(folio);
 
-       copy_highpage(tmp_page, page);
+       copy_highpage(tmp_page, &folio->page);
        ap->pages[ap->num_pages] = tmp_page;
        ap->descs[ap->num_pages].offset = 0;
        ap->descs[ap->num_pages].length = PAGE_SIZE;
-       data->orig_pages[ap->num_pages] = page;
+       data->orig_pages[ap->num_pages] = &folio->page;
 
        inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
        inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
                spin_lock(&fi->lock);
                ap->num_pages++;
                spin_unlock(&fi->lock);
-       } else if (fuse_writepage_add(wpa, page)) {
+       } else if (fuse_writepage_add(wpa, &folio->page)) {
                data->wpa = wpa;
        } else {
-               end_page_writeback(page);
+               folio_end_writeback(folio);
        }
 out_unlock:
-       unlock_page(page);
+       folio_unlock(folio);
 
        return err;
 }
 
  * For unwritten space on the page, we need to start the conversion to
  * regular allocated space.
  */
-static int
-iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
+static int iomap_do_writepage(struct folio *folio,
+               struct writeback_control *wbc, void *data)
 {
-       struct folio *folio = page_folio(page);
        struct iomap_writepage_ctx *wpc = data;
        struct inode *inode = folio->mapping->host;
        u64 end_pos, isize;
 
        clean_buffers(page, ~0U);
 }
 
-static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
+static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
                      void *data)
 {
+       struct page *page = &folio->page;
        struct mpage_data *mpd = data;
        struct bio *bio = mpd->bio;
        struct address_space *mapping = page->mapping;
 
        return ret;
 }
 
-static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
+static int nfs_writepages_callback(struct folio *folio,
+               struct writeback_control *wbc, void *data)
 {
        int ret;
 
-       ret = nfs_do_writepage(page, wbc, data);
+       ret = nfs_do_writepage(&folio->page, wbc, data);
        if (ret != AOP_WRITEPAGE_ACTIVATE)
-               unlock_page(page);
+               folio_unlock(folio);
        return ret;
 }
 
 
        return err;
 }
 
-static int ntfs_resident_writepage(struct page *page,
+static int ntfs_resident_writepage(struct folio *folio,
                struct writeback_control *wbc, void *data)
 {
        struct address_space *mapping = data;
        int ret;
 
        ni_lock(ni);
-       ret = attr_data_write_resident(ni, page);
+       ret = attr_data_write_resident(ni, &folio->page);
        ni_unlock(ni);
 
        if (ret != E_NTFS_NONRESIDENT)
-               unlock_page(page);
+               folio_unlock(folio);
        mapping_set_error(mapping, ret);
        return ret;
 }
 
        return ret;
 }
 
-static int orangefs_writepages_callback(struct page *page,
-    struct writeback_control *wbc, void *data)
+static int orangefs_writepages_callback(struct folio *folio,
+               struct writeback_control *wbc, void *data)
 {
        struct orangefs_writepages *ow = data;
-       struct orangefs_write_range *wr;
+       struct orangefs_write_range *wr = folio->private;
        int ret;
 
-       if (!PagePrivate(page)) {
-               unlock_page(page);
+       if (!wr) {
+               folio_unlock(folio);
                /* It's not private so there's nothing to write, right? */
                printk("writepages_callback not private!\n");
                BUG();
                return 0;
        }
-       wr = (struct orangefs_write_range *)page_private(page);
 
        ret = -1;
        if (ow->npages == 0) {
                ow->len = wr->len;
                ow->uid = wr->uid;
                ow->gid = wr->gid;
-               ow->pages[ow->npages++] = page;
+               ow->pages[ow->npages++] = &folio->page;
                ret = 0;
                goto done;
        }
        }
        if (ow->off + ow->len == wr->pos) {
                ow->len += wr->len;
-               ow->pages[ow->npages++] = page;
+               ow->pages[ow->npages++] = &folio->page;
                ret = 0;
                goto done;
        }
                        orangefs_writepages_work(ow, wbc);
                        ow->npages = 0;
                }
-               ret = orangefs_writepage_locked(page, wbc);
-               mapping_set_error(page->mapping, ret);
-               unlock_page(page);
-               end_page_writeback(page);
+               ret = orangefs_writepage_locked(&folio->page, wbc);
+               mapping_set_error(folio->mapping, ret);
+               folio_unlock(folio);
+               folio_end_writeback(folio);
        } else {
                if (ow->npages == ow->maxpages) {
                        orangefs_writepages_work(ow, wbc);
 
 
 bool wb_over_bg_thresh(struct bdi_writeback *wb);
 
-typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
+typedef int (*writepage_t)(struct folio *folio, struct writeback_control *wbc,
                                void *data);
 
 void tag_pages_for_writeback(struct address_space *mapping,
 
                                goto continue_unlock;
 
                        trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
-                       error = writepage(&folio->page, wbc, data);
+                       error = writepage(folio, wbc, data);
                        if (unlikely(error)) {
                                /*
                                 * Handle errors according to the type of
 }
 EXPORT_SYMBOL(write_cache_pages);
 
-static int writepage_cb(struct page *page, struct writeback_control *wbc,
+static int writepage_cb(struct folio *folio, struct writeback_control *wbc,
                void *data)
 {
        struct address_space *mapping = data;
-       int ret = mapping->a_ops->writepage(page, wbc);
+       int ret = mapping->a_ops->writepage(&folio->page, wbc);
        mapping_set_error(mapping, ret);
        return ret;
 }