* start/recovery path as __block_write_full_folio, along with special
  * code to handle reiserfs tails.
  */
-static int reiserfs_write_full_page(struct page *page,
+static int reiserfs_write_full_folio(struct folio *folio,
                                    struct writeback_control *wbc)
 {
-       struct inode *inode = page->mapping->host;
+       struct inode *inode = folio->mapping->host;
        unsigned long end_index = inode->i_size >> PAGE_SHIFT;
        int error = 0;
        unsigned long block;
        struct buffer_head *head, *bh;
        int partial = 0;
        int nr = 0;
-       int checked = PageChecked(page);
+       int checked = folio_test_checked(folio);
        struct reiserfs_transaction_handle th;
        struct super_block *s = inode->i_sb;
        int bh_per_page = PAGE_SIZE / s->s_blocksize;
 
        /* no logging allowed when nonblocking or from PF_MEMALLOC */
        if (checked && (current->flags & PF_MEMALLOC)) {
-               redirty_page_for_writepage(wbc, page);
-               unlock_page(page);
+               folio_redirty_for_writepage(wbc, folio);
+               folio_unlock(folio);
                return 0;
        }
 
        /*
-        * The page dirty bit is cleared before writepage is called, which
+        * The folio dirty bit is cleared before writepage is called, which
         * means we have to tell create_empty_buffers to make dirty buffers
-        * The page really should be up to date at this point, so tossing
+        * The folio really should be up to date at this point, so tossing
         * in the BH_Uptodate is just a sanity check.
         */
-       if (!page_has_buffers(page)) {
-               create_empty_buffers(page, s->s_blocksize,
+       head = folio_buffers(folio);
+       if (!head)
+               head = folio_create_empty_buffers(folio, s->s_blocksize,
                                     (1 << BH_Dirty) | (1 << BH_Uptodate));
-       }
-       head = page_buffers(page);
 
        /*
-        * last page in the file, zero out any contents past the
+        * last folio in the file, zero out any contents past the
         * last byte in the file
         */
-       if (page->index >= end_index) {
+       if (folio->index >= end_index) {
                unsigned last_offset;
 
                last_offset = inode->i_size & (PAGE_SIZE - 1);
-               /* no file contents in this page */
-               if (page->index >= end_index + 1 || !last_offset) {
-                       unlock_page(page);
+               /* no file contents in this folio */
+               if (folio->index >= end_index + 1 || !last_offset) {
+                       folio_unlock(folio);
                        return 0;
                }
-               zero_user_segment(page, last_offset, PAGE_SIZE);
+               folio_zero_segment(folio, last_offset, folio_size(folio));
        }
        bh = head;
-       block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
+       block = folio->index << (PAGE_SHIFT - s->s_blocksize_bits);
        last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
        /* first map all the buffers, logging any direct items we find */
        do {
                if (block > last_block) {
                        /*
                         * This can happen when the block size is less than
-                        * the page size.  The corresponding bytes in the page
+                        * the folio size.  The corresponding bytes in the folio
                         * were zero filled above
                         */
                        clear_buffer_dirty(bh);
         * blocks we're going to log
         */
        if (checked) {
-               ClearPageChecked(page);
+               folio_clear_checked(folio);
                reiserfs_write_lock(s);
                error = journal_begin(&th, s, bh_per_page + 1);
                if (error) {
                }
                reiserfs_update_inode_transaction(inode);
        }
-       /* now go through and lock any dirty buffers on the page */
+       /* now go through and lock any dirty buffers on the folio */
        do {
                get_bh(bh);
                if (!buffer_mapped(bh))
                        lock_buffer(bh);
                } else {
                        if (!trylock_buffer(bh)) {
-                               redirty_page_for_writepage(wbc, page);
+                               folio_redirty_for_writepage(wbc, folio);
                                continue;
                        }
                }
                if (error)
                        goto fail;
        }
-       BUG_ON(PageWriteback(page));
-       set_page_writeback(page);
-       unlock_page(page);
+       BUG_ON(folio_test_writeback(folio));
+       folio_start_writeback(folio);
+       folio_unlock(folio);
 
        /*
-        * since any buffer might be the only dirty buffer on the page,
-        * the first submit_bh can bring the page out of writeback.
+        * since any buffer might be the only dirty buffer on the folio,
+        * the first submit_bh can bring the folio out of writeback.
         * be careful with the buffers.
         */
        do {
 done:
        if (nr == 0) {
                /*
-                * if this page only had a direct item, it is very possible for
+                * if this folio only had a direct item, it is very possible for
                 * no io to be required without there being an error.  Or,
                 * someone else could have locked them and sent them down the
-                * pipe without locking the page
+                * pipe without locking the folio
                 */
                bh = head;
                do {
                        bh = bh->b_this_page;
                } while (bh != head);
                if (!partial)
-                       SetPageUptodate(page);
-               end_page_writeback(page);
+                       folio_mark_uptodate(folio);
+               folio_end_writeback(folio);
        }
        return error;
 
 fail:
        /*
         * catches various errors, we need to make sure any valid dirty blocks
-        * get to the media.  The page is currently locked and not marked for
+        * get to the media.  The folio is currently locked and not marked for
         * writeback
         */
-       ClearPageUptodate(page);
+       folio_clear_uptodate(folio);
        bh = head;
        do {
                get_bh(bh);
                } else {
                        /*
                         * clear any dirty bits that might have come from
-                        * getting attached to a dirty page
+                        * getting attached to a dirty folio
                         */
                        clear_buffer_dirty(bh);
                }
                bh = bh->b_this_page;
        } while (bh != head);
-       SetPageError(page);
-       BUG_ON(PageWriteback(page));
-       set_page_writeback(page);
-       unlock_page(page);
+       folio_set_error(folio);
+       BUG_ON(folio_test_writeback(folio));
+       folio_start_writeback(folio);
+       folio_unlock(folio);
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
 
 static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
 {
-       struct inode *inode = page->mapping->host;
+       struct folio *folio = page_folio(page);
+       struct inode *inode = folio->mapping->host;
        reiserfs_wait_on_write_block(inode->i_sb);
-       return reiserfs_write_full_page(page, wbc);
+       return reiserfs_write_full_folio(folio, wbc);
 }
 
 static void reiserfs_truncate_failed_write(struct inode *inode)