]> www.infradead.org Git - users/willy/linux.git/commitdiff
iomap: Convert iomap_read_inline_data to take a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 24 Jul 2021 03:24:50 +0000 (23:24 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 25 Nov 2021 19:03:03 +0000 (14:03 -0500)
We still only support up to a single page of inline data (at least,
per call to iomap_read_inline_data()), but it can now be written into
the middle of a folio in case we decide to allocate a 16KiB page for
a file that's 8.1KiB in size.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
fs/iomap/buffered-io.c

index c7c4ae7356208279ca33b310bb776471df87c0c6..96a404f11a3b9d9f6cb7556cf5ffde1e8e23fcdd 100644 (file)
@@ -195,9 +195,8 @@ struct iomap_readpage_ctx {
 };
 
 static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
-               struct page *page)
+               struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
        struct iomap_page *iop;
        const struct iomap *iomap = iomap_iter_srcmap(iter);
        size_t size = i_size_read(iter->inode) - iomap->offset;
@@ -205,7 +204,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
        size_t offset = offset_in_folio(folio, iomap->offset);
        void *addr;
 
-       if (PageUptodate(page))
+       if (folio_test_uptodate(folio))
                return PAGE_SIZE - poff;
 
        if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
@@ -220,7 +219,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
        else
                iop = to_iomap_page(folio);
 
-       addr = kmap_local_page(page) + poff;
+       addr = kmap_local_folio(folio, offset);
        memcpy(addr, iomap->inline_data, size);
        memset(addr + size, 0, PAGE_SIZE - poff - size);
        kunmap_local(addr);
@@ -252,7 +251,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
        sector_t sector;
 
        if (iomap->type == IOMAP_INLINE)
-               return min(iomap_read_inline_data(iter, page), length);
+               return min(iomap_read_inline_data(iter, folio), length);
 
        /* zero post-eof blocks as the page may be mapped */
        iop = iomap_page_create(iter->inode, folio);
@@ -586,12 +585,13 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 static int iomap_write_begin_inline(const struct iomap_iter *iter,
                struct page *page)
 {
+       struct folio *folio = page_folio(page);
        int ret;
 
        /* needs more work for the tailpacking case; disable for now */
        if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
                return -EIO;
-       ret = iomap_read_inline_data(iter, page);
+       ret = iomap_read_inline_data(iter, folio);
        if (ret < 0)
                return ret;
        return 0;