{
        /* Decrypt if needed */
        if (uptodate &&
-           fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) {
+           fscrypt_inode_uses_fs_layer_crypto(bh->b_folio->mapping->host)) {
                struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
 
                if (ctx) {
 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
 {
        struct address_space *mapping = inode->i_mapping;
-       struct address_space *buffer_mapping = bh->b_page->mapping;
+       struct address_space *buffer_mapping = bh->b_folio->mapping;
 
        mark_buffer_dirty(bh);
        if (!mapping->private_data) {
  * and then attach the address_space's inode to its superblock's dirty
  * inode list.
  *
- * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
+ * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->private_lock,
  * i_pages lock and mapping->host->i_lock.
  */
 void mark_buffer_dirty(struct buffer_head *bh)
 
        set_buffer_write_io_error(bh);
        /* FIXME: do we need to set this in both places? */
-       if (bh->b_page && bh->b_page->mapping)
-               mapping_set_error(bh->b_page->mapping, -EIO);
+       if (bh->b_folio && bh->b_folio->mapping)
+               mapping_set_error(bh->b_folio->mapping, -EIO);
        if (bh->b_assoc_map)
                mapping_set_error(bh->b_assoc_map, -EIO);
        rcu_read_lock();
 {
        clear_buffer_dirty(bh);
        if (bh->b_assoc_map) {
-               struct address_space *buffer_mapping = bh->b_page->mapping;
+               struct address_space *buffer_mapping = bh->b_folio->mapping;
 
                spin_lock(&buffer_mapping->private_lock);
                list_del_init(&bh->b_assoc_buffers);