]> www.infradead.org Git - users/willy/linux.git/commitdiff
iomap: Convert iomap_page_mkwrite to use a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 29 Apr 2021 02:32:02 +0000 (22:32 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 16 Dec 2021 20:49:52 +0000 (15:49 -0500)
If we write to any page in a folio, we have to mark the entire
folio as dirty, and potentially COW the entire folio, because it'll
all get written back as one unit.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
fs/iomap/buffered-io.c

index ad89c20cb741bcde4424a9278a3713cc2e9d479e..8d7a67655b60957adc5d9db991f0eec1bbd4f296 100644 (file)
@@ -967,10 +967,9 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
 }
 EXPORT_SYMBOL_GPL(iomap_truncate_page);
 
-static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
-               struct page *page)
+static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
+               struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
        loff_t length = iomap_length(iter);
        int ret;
 
@@ -979,10 +978,10 @@ static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
                                              &iter->iomap);
                if (ret)
                        return ret;
-               block_commit_write(page, 0, length);
+               block_commit_write(&folio->page, 0, length);
        } else {
-               WARN_ON_ONCE(!PageUptodate(page));
-               set_page_dirty(page);
+               WARN_ON_ONCE(!folio_test_uptodate(folio));
+               folio_mark_dirty(folio);
        }
 
        return length;
@@ -994,24 +993,24 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
                .inode          = file_inode(vmf->vma->vm_file),
                .flags          = IOMAP_WRITE | IOMAP_FAULT,
        };
-       struct page *page = vmf->page;
+       struct folio *folio = page_folio(vmf->page);
        ssize_t ret;
 
-       lock_page(page);
-       ret = page_mkwrite_check_truncate(page, iter.inode);
+       folio_lock(folio);
+       ret = folio_mkwrite_check_truncate(folio, iter.inode);
        if (ret < 0)
                goto out_unlock;
-       iter.pos = page_offset(page);
+       iter.pos = folio_pos(folio);
        iter.len = ret;
        while ((ret = iomap_iter(&iter, ops)) > 0)
-               iter.processed = iomap_page_mkwrite_iter(&iter, page);
+               iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
 
        if (ret < 0)
                goto out_unlock;
-       wait_for_stable_page(page);
+       folio_wait_stable(folio);
        return VM_FAULT_LOCKED;
 out_unlock:
-       unlock_page(page);
+       folio_unlock(folio);
        return block_page_mkwrite_return(ret);
 }
 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);