]> www.infradead.org Git - users/willy/linux.git/commitdiff
iomap: Support large folios in invalidatepage
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 13 Jan 2021 15:48:49 +0000 (10:48 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 18 Dec 2021 05:06:08 +0000 (00:06 -0500)
If we're punching a hole in a large folio, we need to remove the
per-folio iomap data as the folio is about to be split and each page will
need its own.  If a dirty folio is only partially-uptodate, the iomap
data contains the information about which blocks cannot be written back,
so assert that a dirty folio is fully uptodate.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
fs/iomap/buffered-io.c

index ed796055e57800fa610ce60480135d8ccdfdaa94..ba80bedd9590c5d6ffc541d4d5f0f12946738df3 100644 (file)
@@ -481,13 +481,18 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
        trace_iomap_invalidatepage(folio->mapping->host, offset, len);
 
        /*
-        * If we're invalidating the entire page, clear the dirty state from it
-        * and release it to avoid unnecessary buildup of the LRU.
+        * If we're invalidating the entire folio, clear the dirty state
+        * from it and release it to avoid unnecessary buildup of the LRU.
         */
        if (offset == 0 && len == folio_size(folio)) {
                WARN_ON_ONCE(folio_test_writeback(folio));
                folio_cancel_dirty(folio);
                iomap_page_release(folio);
+       } else if (folio_test_large(folio)) {
+               /* Must release the iop so the page can be split */
+               WARN_ON_ONCE(!folio_test_uptodate(folio) &&
+                            folio_test_dirty(folio));
+               iomap_page_release(folio);
        }
 }
 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);