}
 EXPORT_SYMBOL(__block_write_begin);
 
-static void __block_commit_write(struct folio *folio, size_t from, size_t to)
+void block_commit_write(struct folio *folio, size_t from, size_t to)
 {
        size_t block_start, block_end;
        bool partial = false;
        if (!partial)
                folio_mark_uptodate(folio);
 }
+EXPORT_SYMBOL(block_commit_write);
 
 /*
  * block_write_begin takes care of the basic task of block allocation and
        flush_dcache_folio(folio);
 
        /* This could be a short (even 0-length) commit */
-       __block_commit_write(folio, start, start + copied);
+       block_commit_write(folio, start, start + copied);
 
        return copied;
 }
 }
 EXPORT_SYMBOL(cont_write_begin);
 
-void block_commit_write(struct page *page, unsigned from, unsigned to)
-{
-       struct folio *folio = page_folio(page);
-       __block_commit_write(folio, from, to);
-}
-EXPORT_SYMBOL(block_commit_write);
-
 /*
  * block_page_mkwrite() is not allowed to change the file size as it gets
  * called from a page fault handler when a page is first dirtied. Hence we must
        if (unlikely(ret))
                goto out_unlock;
 
-       __block_commit_write(folio, 0, end);
+       block_commit_write(folio, 0, end);
 
        folio_mark_dirty(folio);
        folio_wait_stable(folio);
 
                goto retry;
 
        if (folio)
-               block_commit_write(&folio->page, from, to);
+               block_commit_write(folio, from, to);
 out:
        if (folio) {
                folio_unlock(folio);
 
                bh = bh->b_this_page;
        }
 
-       block_commit_write(&folio[0]->page, from, from + replaced_size);
+       block_commit_write(folio[0], from, from + replaced_size);
 
        /* Even in case of data=writeback it is reasonable to pin
         * inode to transaction, to prevent unexpected data loss */
 
                                              &iter->iomap);
                if (ret)
                        return ret;
-               block_commit_write(&folio->page, 0, length);
+               block_commit_write(folio, 0, length);
        } else {
                WARN_ON_ONCE(!folio_test_uptodate(folio));
                folio_mark_dirty(folio);
 
                                ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
                                                           user_pos, user_len);
 
-                       block_commit_write(&folio->page, from, to);
+                       block_commit_write(folio, from, to);
                }
        }
 }
                                ocfs2_jbd2_inode_add_write(handle, inode,
                                                           start_byte, length);
                        }
-                       block_commit_write(&folio->page, from, to);
+                       block_commit_write(folio, from, to);
                }
        }
 
 
 
 
                /* must not update i_size! */
-               block_commit_write(&folio->page, block_start + 1, block_start + 1);
+               block_commit_write(folio, block_start + 1, block_start + 1);
        }
 
        /*
 
                goto out_unlock;
        }
 
-       block_commit_write(&folio->page, 0, end);
+       block_commit_write(folio, 0, end);
 out_dirty:
        folio_mark_dirty(folio);
        folio_wait_stable(folio);
 
                        unsigned, struct folio **, void **,
                        get_block_t *, loff_t *);
 int generic_cont_expand_simple(struct inode *inode, loff_t size);
-void block_commit_write(struct page *page, unsigned int from, unsigned int to);
+void block_commit_write(struct folio *folio, size_t from, size_t to);
 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                                get_block_t get_block);
 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);