static void io_submit_add_bh(struct ext4_io_submit *io,
                             struct inode *inode,
-                            struct page *pagecache_page,
-                            struct page *bounce_page,
+                            struct folio *folio,
+                            struct folio *io_folio,
                             struct buffer_head *bh)
 {
-       int ret;
-
        if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
                           !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
 submit_and_retry:
        }
        if (io->io_bio == NULL)
                io_submit_init_bio(io, bh);
-       ret = bio_add_page(io->io_bio, bounce_page ?: pagecache_page,
-                          bh->b_size, bh_offset(bh));
-       if (ret != bh->b_size)
+       if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
                goto submit_and_retry;
-       wbc_account_cgroup_owner(io->io_wbc, pagecache_page, bh->b_size);
+       wbc_account_cgroup_owner(io->io_wbc, &folio->page, bh->b_size);
        io->io_next_block++;
 }
 
                        struct page *page,
                        int len)
 {
-       struct page *bounce_page = NULL;
-       struct inode *inode = page->mapping->host;
+       struct folio *folio = page_folio(page);
+       struct folio *io_folio = folio;
+       struct inode *inode = folio->mapping->host;
        unsigned block_start;
        struct buffer_head *bh, *head;
        int ret = 0;
        struct writeback_control *wbc = io->io_wbc;
        bool keep_towrite = false;
 
-       BUG_ON(!PageLocked(page));
-       BUG_ON(PageWriteback(page));
+       BUG_ON(!folio_test_locked(folio));
+       BUG_ON(folio_test_writeback(folio));
 
-       ClearPageError(page);
+       folio_clear_error(folio);
 
        /*
         * Comments copied from block_write_full_page:
         *
-        * The page straddles i_size.  It must be zeroed out on each and every
+        * The folio straddles i_size.  It must be zeroed out on each and every
         * writepage invocation because it may be mmapped.  "A file is mapped
         * in multiples of the page size.  For a file that is not a multiple of
         * the page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       if (len < PAGE_SIZE)
-               zero_user_segment(page, len, PAGE_SIZE);
+       if (len < folio_size(folio))
+               folio_zero_segment(folio, len, folio_size(folio));
        /*
         * In the first loop we prepare and mark buffers to submit. We have to
-        * mark all buffers in the page before submitting so that
-        * end_page_writeback() cannot be called from ext4_end_bio() when IO
+        * mark all buffers in the folio before submitting so that
+        * folio_end_writeback() cannot be called from ext4_end_bio() when IO
         * on the first buffer finishes and we are still working on submitting
         * the second buffer.
         */
-       bh = head = page_buffers(page);
+       bh = head = folio_buffers(folio);
        do {
                block_start = bh_offset(bh);
                if (block_start >= len) {
                                clear_buffer_dirty(bh);
                        /*
                         * Keeping dirty some buffer we cannot write? Make sure
-                        * to redirty the page and keep TOWRITE tag so that
-                        * racing WB_SYNC_ALL writeback does not skip the page.
+                        * to redirty the folio and keep TOWRITE tag so that
+                        * racing WB_SYNC_ALL writeback does not skip the folio.
                         * This happens e.g. when doing writeout for
                         * transaction commit.
                         */
                        if (buffer_dirty(bh)) {
-                               if (!PageDirty(page))
-                                       redirty_page_for_writepage(wbc, page);
+                               if (!folio_test_dirty(folio))
+                                       folio_redirty_for_writepage(wbc, folio);
                                keep_towrite = true;
                        }
                        continue;
                nr_to_submit++;
        } while ((bh = bh->b_this_page) != head);
 
-       /* Nothing to submit? Just unlock the page... */
+       /* Nothing to submit? Just unlock the folio... */
        if (!nr_to_submit)
                return 0;
 
-       bh = head = page_buffers(page);
+       bh = head = folio_buffers(folio);
 
        /*
         * If any blocks are being written to an encrypted file, encrypt them
        if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) {
                gfp_t gfp_flags = GFP_NOFS;
                unsigned int enc_bytes = round_up(len, i_blocksize(inode));
+               struct page *bounce_page;
 
                /*
                 * Since bounce page allocation uses a mempool, we can only use
                        }
 
                        printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
-                       redirty_page_for_writepage(wbc, page);
+                       folio_redirty_for_writepage(wbc, folio);
                        do {
                                if (buffer_async_write(bh)) {
                                        clear_buffer_async_write(bh);
 
                        return ret;
                }
+               io_folio = page_folio(bounce_page);
        }
 
-       if (keep_towrite)
-               set_page_writeback_keepwrite(page);
-       else
-               set_page_writeback(page);
+       __folio_start_writeback(folio, keep_towrite);
 
        /* Now submit buffers to write */
        do {
                if (!buffer_async_write(bh))
                        continue;
-               io_submit_add_bh(io, inode, page, bounce_page, bh);
+               io_submit_add_bh(io, inode, folio, io_folio, bh);
        } while ((bh = bh->b_this_page) != head);
 
        return 0;