From acede6a57360ec361943c9eb8c4b8d2387af67e9 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:25 +0100 Subject: [PATCH 01/16] f2fs: Use bio_for_each_folio_all() in __has_merged_page() Iterate over each folio rather than each page. Convert f2fs_compress_control_page() to f2fs_compress_control_folio() since this is the only caller. Removes a reference to page->mapping which is going away soon as well as calls to fscrypt_is_bounce_page() and fscrypt_pagecache_page(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 6 ++++-- fs/f2fs/data.c | 19 +++++++++---------- fs/f2fs/f2fs.h | 4 ++-- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index a01567bbcd33..a4cd957f6ade 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -137,9 +137,11 @@ static void f2fs_put_rpages_wbc(struct compress_ctx *cc, } } -struct page *f2fs_compress_control_page(struct page *page) +struct folio *f2fs_compress_control_folio(struct folio *folio) { - return ((struct compress_io_ctx *)page_private(page))->rpages[0]; + struct compress_io_ctx *ctx = folio->private; + + return page_folio(ctx->rpages[0]); } int f2fs_init_compress_ctx(struct compress_ctx *cc) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 31a2977441c5..5cf4b0517e6c 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -548,8 +548,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) static bool __has_merged_page(struct bio *bio, struct inode *inode, struct page *page, nid_t ino) { - struct bio_vec *bvec; - struct bvec_iter_all iter_all; + struct folio_iter fi; if (!bio) return false; @@ -557,25 +556,25 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode, if (!inode && !page && !ino) return true; - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *target = bvec->bv_page; + bio_for_each_folio_all(fi, bio) { + struct folio *target = fi.folio; - if (fscrypt_is_bounce_page(target)) { - target = fscrypt_pagecache_page(target); + if (fscrypt_is_bounce_folio(target)) { + target = fscrypt_pagecache_folio(target); if (IS_ERR(target)) continue; } - if (f2fs_is_compressed_page(target)) { - target = f2fs_compress_control_page(target); + if (f2fs_is_compressed_page(&target->page)) { + target = f2fs_compress_control_folio(target); if (IS_ERR(target)) continue; } if (inode && inode == target->mapping->host) return true; - if (page && page == target) + if (page && page == &target->page) return true; - if (ino && ino == ino_of_node(target)) + if (ino && ino == ino_of_node(&target->page)) return true; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index bed74ab426cf..a3e12566105e 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -4442,7 +4442,7 @@ enum cluster_check_type { CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */ }; bool f2fs_is_compressed_page(struct page *page); -struct page *f2fs_compress_control_page(struct page *page); +struct folio *f2fs_compress_control_folio(struct folio *folio); int f2fs_prepare_compress_overwrite(struct inode *inode, struct page **pagep, pgoff_t index, void **fsdata); bool f2fs_compress_write_end(struct inode *inode, void *fsdata, @@ -4519,7 +4519,7 @@ static inline bool f2fs_is_compress_backend_ready(struct inode *inode) return false; } static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; } -static inline struct page *f2fs_compress_control_page(struct page *page) +static inline struct folio *f2fs_compress_control_folio(struct folio *folio) { WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); -- 2.51.0 From 1db30d82365b3feacc21a1bd9158f7b14e2d0500 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:26 +0100 Subject: [PATCH 02/16] f2fs: Use a folio in add_ipu_page() Convert the incoming page to a folio at the start, then use the folio later in the function. Moves a call to page_folio() earlier, and removes an access to page->mapping. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 5cf4b0517e6c..5759a8d6b926 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -779,6 +779,7 @@ static void del_bio_entry(struct bio_entry *be) static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, struct page *page) { + struct folio *fio_folio = page_folio(fio->page); struct f2fs_sb_info *sbi = fio->sbi; enum temp_type temp; bool found = false; @@ -800,8 +801,8 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, *fio->last_block, fio->new_blkaddr)); if (f2fs_crypt_mergeable_bio(*bio, - fio->page->mapping->host, - page_folio(fio->page)->index, fio) && + fio_folio->mapping->host, + fio_folio->index, fio) && bio_add_page(*bio, page, PAGE_SIZE, 0) == PAGE_SIZE) { ret = 0; -- 2.51.0 From 6f8b9318c6eb958f64a985a0af7efc2702f15605 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:27 +0100 Subject: [PATCH 03/16] f2fs: Remove access to page->mapping in f2fs_is_cp_guaranteed() page->mapping will be removed soon, so call page_folio() on the page that was passed in. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 5759a8d6b926..2b3147148cc6 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -49,7 +49,7 @@ void f2fs_destroy_bioset(void) bool f2fs_is_cp_guaranteed(struct page *page) { - struct address_space *mapping = page->mapping; + struct address_space *mapping = page_folio(page)->mapping; struct inode *inode; struct f2fs_sb_info *sbi; -- 2.51.0 From c14b4562bc9bd7cb57dbb95eae73fc3f24cdfadc Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:28 +0100 Subject: [PATCH 04/16] f2fs: Use a folio in move_data_block() Fetch a folio from the pagecache instead of a page and operate on it throughout. Removes eight calls to compound_head() and an access to page->mapping. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/gc.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 8b5a55b72264..171b6f9a2fbc 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -1307,7 +1307,8 @@ static int move_data_block(struct inode *inode, block_t bidx, struct dnode_of_data dn; struct f2fs_summary sum; struct node_info ni; - struct page *page, *mpage; + struct page *page; + struct folio *mfolio; block_t newaddr; int err = 0; bool lfs_mode = f2fs_lfs_mode(fio.sbi); @@ -1359,20 +1360,20 @@ static int move_data_block(struct inode *inode, block_t bidx, if (lfs_mode) f2fs_down_write(&fio.sbi->io_order_lock); - mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), + mfolio = f2fs_grab_cache_folio(META_MAPPING(fio.sbi), fio.old_blkaddr, false); - if (!mpage) { - err = -ENOMEM; + if (IS_ERR(mfolio)) { + err = PTR_ERR(mfolio); goto up_out; } - fio.encrypted_page = mpage; + fio.encrypted_page = folio_file_page(mfolio, fio.old_blkaddr); - /* read source block in mpage */ - if (!PageUptodate(mpage)) { + /* read source block in mfolio */ + if (!folio_test_uptodate(mfolio)) { err = f2fs_submit_page_bio(&fio); if (err) { - f2fs_put_page(mpage, 1); + f2fs_folio_put(mfolio, true); goto up_out; } @@ -1381,11 +1382,11 @@ static int move_data_block(struct inode *inode, block_t bidx, f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE); - lock_page(mpage); - if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || - !PageUptodate(mpage))) { + folio_lock(mfolio); + if (unlikely(mfolio->mapping != META_MAPPING(fio.sbi) || + !folio_test_uptodate(mfolio))) { err = -EIO; - f2fs_put_page(mpage, 1); + f2fs_folio_put(mfolio, true); goto up_out; } } @@ -1396,7 +1397,7 @@ static int move_data_block(struct inode *inode, block_t bidx, err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, &sum, type, NULL); if (err) { - f2fs_put_page(mpage, 1); + f2fs_folio_put(mfolio, true); /* filesystem should shutdown, no need to recovery block */ goto up_out; } @@ -1405,15 +1406,15 @@ static int move_data_block(struct inode *inode, block_t bidx, newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); if (!fio.encrypted_page) { err = -ENOMEM; - f2fs_put_page(mpage, 1); + f2fs_folio_put(mfolio, true); goto recover_block; } /* write target block */ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); memcpy(page_address(fio.encrypted_page), - page_address(mpage), PAGE_SIZE); - f2fs_put_page(mpage, 1); + folio_address(mfolio), PAGE_SIZE); + f2fs_folio_put(mfolio, true); f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1); -- 2.51.0 From 0d1e687e432bae1ac32c2b8a3799172f94b3f96a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:29 +0100 Subject: [PATCH 05/16] f2fs: Use a folio in f2fs_quota_read() Support arbitrary size folios and remove a few hidden calls to compound_head(). Also remove an unnecessary test of the uptodaate flag; if mapping_read_folio_gfp() cannot bring the folio uptodate, it will return an error. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/super.c | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 22f26871b7aa..8315016914e8 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2692,12 +2692,9 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data, { struct inode *inode = sb_dqopt(sb)->files[type]; struct address_space *mapping = inode->i_mapping; - block_t blkidx = F2FS_BYTES_TO_BLK(off); - int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; loff_t i_size = i_size_read(inode); - struct page *page; if (off > i_size) return 0; @@ -2706,37 +2703,36 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data, len = i_size - off; toread = len; while (toread > 0) { - tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); + struct folio *folio; + size_t offset; + repeat: - page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS); - if (IS_ERR(page)) { - if (PTR_ERR(page) == -ENOMEM) { + folio = mapping_read_folio_gfp(mapping, off >> PAGE_SHIFT, + GFP_NOFS); + if (IS_ERR(folio)) { + if (PTR_ERR(folio) == -ENOMEM) { memalloc_retry_wait(GFP_NOFS); goto repeat; } set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); - return PTR_ERR(page); + return PTR_ERR(folio); } + offset = offset_in_folio(folio, off); + tocopy = min(folio_size(folio) - offset, toread); - lock_page(page); + folio_lock(folio); - if (unlikely(page->mapping != mapping)) { - f2fs_put_page(page, 1); + if (unlikely(folio->mapping != mapping)) { + f2fs_folio_put(folio, true); goto repeat; } - if (unlikely(!PageUptodate(page))) { - f2fs_put_page(page, 1); - set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); - return -EIO; - } - memcpy_from_page(data, page, offset, tocopy); - f2fs_put_page(page, 1); + memcpy_from_folio(data, folio, offset, tocopy); + f2fs_folio_put(folio, true); - offset = 0; toread -= tocopy; data += tocopy; - blkidx++; + off += tocopy; } return len; } -- 2.51.0 From b15ca18571578f776782b7347831cf14a57e8018 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:30 +0100 Subject: [PATCH 06/16] f2fs: Add f2fs_grab_meta_folio() Turn f2fs_grab_meta_page() into a wrapper around f2fs_grab_meta_folio(). Saves three hidden calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 16 ++++++++-------- fs/f2fs/f2fs.h | 8 +++++++- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 85b7141f0d89..7dbd7f33d807 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -38,20 +38,20 @@ void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, /* * We guarantee no failure on the returned page. */ -struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) +struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index) { struct address_space *mapping = META_MAPPING(sbi); - struct page *page; + struct folio *folio; repeat: - page = f2fs_grab_cache_page(mapping, index, false); - if (!page) { + folio = f2fs_grab_cache_folio(mapping, index, false); + if (IS_ERR(folio)) { cond_resched(); goto repeat; } - f2fs_wait_on_page_writeback(page, META, true, true); - if (!PageUptodate(page)) - SetPageUptodate(page); - return page; + f2fs_folio_wait_writeback(folio, META, true, true); + if (!folio_test_uptodate(folio)) + folio_mark_uptodate(folio); + return folio; } static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index a3e12566105e..394343518dcf 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3893,7 +3893,7 @@ static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi) void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, unsigned char reason); void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi); -struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); +struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); @@ -3936,6 +3936,12 @@ int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); +static inline +struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) +{ + return &f2fs_grab_meta_folio(sbi, index)->page; +} + /* * data.c */ -- 2.51.0 From 668c7a564823a825a87924c87147d202580c293c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:31 +0100 Subject: [PATCH 07/16] f2fs: Use a folio in commit_checkpoint() Save four calls to compound_head(). Also remove the call to f2fs_wait_on_page_writeback() as this was already done by f2fs_grab_meta_folio() and writeback can't have restarted in the meantime since we hold the folio locked. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 7dbd7f33d807..0305158afc41 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1407,30 +1407,28 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi, }; /* - * filemap_get_folios_tag and lock_page again will take + * filemap_get_folios_tag and folio_lock again will take * some extra time. Therefore, f2fs_update_meta_pages and * f2fs_sync_meta_pages are combined in this function. */ - struct page *page = f2fs_grab_meta_page(sbi, blk_addr); + struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr); int err; - f2fs_wait_on_page_writeback(page, META, true, true); + memcpy(folio_address(folio), src, PAGE_SIZE); - memcpy(page_address(page), src, PAGE_SIZE); - - set_page_dirty(page); - if (unlikely(!clear_page_dirty_for_io(page))) + folio_mark_dirty(folio); + if (unlikely(!folio_clear_dirty_for_io(folio))) f2fs_bug_on(sbi, 1); /* writeout cp pack 2 page */ - err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO); + err = __f2fs_write_meta_page(&folio->page, &wbc, FS_CP_META_IO); if (unlikely(err && f2fs_cp_error(sbi))) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); return; } f2fs_bug_on(sbi, err); - f2fs_put_page(page, 0); + f2fs_folio_put(folio, false); /* submit checkpoint (with barrier if NOBARRIER is not set) */ f2fs_submit_merged_write(sbi, META_FLUSH); -- 2.51.0 From a8d397386371cb1c366268402bd48d1a20aa5bf9 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:32 +0100 Subject: [PATCH 08/16] f2fs: Convert __f2fs_write_meta_page() to __f2fs_write_meta_folio() All callers now have a folio so pass it in. Saves three hidden calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 0305158afc41..5d5cd4ee21fb 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -340,12 +340,11 @@ void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, f2fs_ra_meta_pages(sbi, index, ra_blocks, META_POR, true); } -static int __f2fs_write_meta_page(struct page *page, +static int __f2fs_write_meta_folio(struct folio *folio, struct writeback_control *wbc, enum iostat_type io_type) { - struct f2fs_sb_info *sbi = F2FS_P_SB(page); - struct folio *folio = page_folio(page); + struct f2fs_sb_info *sbi = F2FS_F_SB(folio); trace_f2fs_writepage(folio, META); @@ -367,7 +366,7 @@ static int __f2fs_write_meta_page(struct page *page, dec_page_count(sbi, F2FS_DIRTY_META); if (wbc->for_reclaim) - f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META); + f2fs_submit_merged_write_cond(sbi, NULL, &folio->page, 0, META); folio_unlock(folio); @@ -377,7 +376,7 @@ static int __f2fs_write_meta_page(struct page *page, return 0; redirty_out: - redirty_page_for_writepage(wbc, page); + folio_redirty_for_writepage(wbc, folio); return AOP_WRITEPAGE_ACTIVATE; } @@ -463,7 +462,7 @@ continue_unlock: if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; - if (__f2fs_write_meta_page(&folio->page, &wbc, + if (__f2fs_write_meta_folio(folio, &wbc, io_type)) { folio_unlock(folio); break; @@ -1421,7 +1420,7 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi, f2fs_bug_on(sbi, 1); /* writeout cp pack 2 page */ - err = __f2fs_write_meta_page(&folio->page, &wbc, FS_CP_META_IO); + err = __f2fs_write_meta_folio(folio, &wbc, FS_CP_META_IO); if (unlikely(err && f2fs_cp_error(sbi))) { f2fs_folio_put(folio, true); return; -- 2.51.0 From 46fd261c677e65df36564997f7a5227f3e352de9 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:33 +0100 Subject: [PATCH 09/16] f2fs: Use f2fs_folio_wait_writeback() There were some missing conversions from f2fs_wait_on_page_writeback() to f2fs_folio_wait_writeback(). Saves a call to compound_head() at each callsite. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 3 +-- fs/f2fs/data.c | 4 ++-- fs/f2fs/file.c | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 5d5cd4ee21fb..f5f0915dde79 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -456,8 +456,7 @@ continue_unlock: goto continue_unlock; } - f2fs_wait_on_page_writeback(&folio->page, META, - true, true); + f2fs_folio_wait_writeback(folio, META, true, true); if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 2b3147148cc6..2e901403000d 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -3128,7 +3128,7 @@ continue_unlock: if (folio_test_writeback(folio)) { if (wbc->sync_mode == WB_SYNC_NONE) goto continue_unlock; - f2fs_wait_on_page_writeback(&folio->page, DATA, true, true); + f2fs_folio_wait_writeback(folio, DATA, true, true); } if (!folio_clear_dirty_for_io(folio)) @@ -3623,7 +3623,7 @@ repeat: } } - f2fs_wait_on_page_writeback(&folio->page, DATA, false, true); + f2fs_folio_wait_writeback(folio, DATA, false, true); if (len == folio_size(folio) || folio_test_uptodate(folio)) return 0; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index a71946976761..6c8250af129a 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -131,7 +131,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) goto out_sem; } - f2fs_wait_on_page_writeback(folio_page(folio, 0), DATA, false, true); + f2fs_folio_wait_writeback(folio, DATA, false, true); /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); -- 2.51.0 From b629c6480ece59d96ae0bc2647bccff58f542be4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:34 +0100 Subject: [PATCH 10/16] f2fs: Pass a folio to f2fs_submit_merged_ipu_write() The only caller which passes a page already has a folio, so pass it in. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 8 ++++---- fs/f2fs/f2fs.h | 2 +- fs/f2fs/segment.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 2e901403000d..dfe4ce31105c 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -826,13 +826,13 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, } void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, - struct bio **bio, struct page *page) + struct bio **bio, struct folio *folio) { enum temp_type temp; bool found = false; struct bio *target = bio ? *bio : NULL; - f2fs_bug_on(sbi, !target && !page); + f2fs_bug_on(sbi, !target && !folio); for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) { struct f2fs_bio_info *io = sbi->write_io[DATA] + temp; @@ -848,7 +848,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, found = (target == be->bio); else found = __has_merged_page(be->bio, NULL, - page, 0); + &folio->page, 0); if (found) break; } @@ -865,7 +865,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, found = (target == be->bio); else found = __has_merged_page(be->bio, NULL, - page, 0); + &folio->page, 0); if (found) { target = be->bio; del_bio_entry(be); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 394343518dcf..7960a2f4a1e4 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3958,7 +3958,7 @@ void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, struct inode *inode, struct page *page, nid_t ino, enum page_type type); void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, - struct bio **bio, struct page *page); + struct bio **bio, struct folio *folio); void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); int f2fs_submit_page_bio(struct f2fs_io_info *fio); int f2fs_merge_page_bio(struct f2fs_io_info *fio); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index dcbc38c5c140..76e6fce82fa1 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -4171,7 +4171,7 @@ void f2fs_folio_wait_writeback(struct folio *folio, enum page_type type, /* submit cached LFS IO */ f2fs_submit_merged_write_cond(sbi, NULL, &folio->page, 0, type); /* submit cached IPU IO */ - f2fs_submit_merged_ipu_write(sbi, NULL, &folio->page); + f2fs_submit_merged_ipu_write(sbi, NULL, folio); if (ordered) { folio_wait_writeback(folio); f2fs_bug_on(sbi, locked && folio_test_writeback(folio)); -- 2.51.0 From 9030d55aedf80f7a04077b5916c9bc6ecb3b9b49 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:35 +0100 Subject: [PATCH 11/16] f2fs: Convert __get_meta_page() to __get_meta_folio() Push the conversion to a page into the callers. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index f5f0915dde79..00de4d06e579 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -54,7 +54,7 @@ repeat: return folio; } -static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, +static struct folio *__get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index, bool is_meta) { struct address_space *mapping = META_MAPPING(sbi); @@ -104,34 +104,34 @@ repeat: return ERR_PTR(-EIO); } out: - return &folio->page; + return folio; } struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) { - return __get_meta_page(sbi, index, true); + return &__get_meta_folio(sbi, index, true)->page; } struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index) { - struct page *page; + struct folio *folio; int count = 0; retry: - page = __get_meta_page(sbi, index, true); - if (IS_ERR(page)) { - if (PTR_ERR(page) == -EIO && + folio = __get_meta_folio(sbi, index, true); + if (IS_ERR(folio)) { + if (PTR_ERR(folio) == -EIO && ++count <= DEFAULT_RETRY_IO_COUNT) goto retry; f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_META_PAGE); } - return page; + return &folio->page; } /* for POR only */ struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index) { - return __get_meta_page(sbi, index, false); + return &__get_meta_folio(sbi, index, false)->page; } static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr, -- 2.51.0 From 937d6a4d2c21bf02d10967d1f6e2d2afe61c99c2 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:36 +0100 Subject: [PATCH 12/16] f2fs: Convert f2fs_get_tmp_page() to f2fs_get_tmp_folio() Convert all the callers to receive a folio. Removes a lot of hidden calls to compound_head() in f2fs_put_page(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 4 +-- fs/f2fs/f2fs.h | 2 +- fs/f2fs/node.c | 10 +++--- fs/f2fs/recovery.c | 86 ++++++++++++++++++++++---------------------- 4 files changed, 52 insertions(+), 50 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 00de4d06e579..1820cb14f716 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -129,9 +129,9 @@ retry: } /* for POR only */ -struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index) +struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index) { - return &__get_meta_folio(sbi, index, false)->page; + return __get_meta_folio(sbi, index, false); } static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 7960a2f4a1e4..3e9cb3206bba 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3896,7 +3896,7 @@ void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi); struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); -struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); +struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index); bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type); bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi, diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 3f6f5e54759c..3ca321fa7495 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2900,17 +2900,17 @@ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true); for (idx = addr; idx < addr + nrpages; idx++) { - struct page *page = f2fs_get_tmp_page(sbi, idx); + struct folio *folio = f2fs_get_tmp_folio(sbi, idx); - if (IS_ERR(page)) - return PTR_ERR(page); + if (IS_ERR(folio)) + return PTR_ERR(folio); - rn = F2FS_NODE(page); + rn = F2FS_NODE(&folio->page); sum_entry->nid = rn->footer.nid; sum_entry->version = 0; sum_entry->ofs_in_node = 0; sum_entry++; - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); } invalidate_mapping_pages(META_MAPPING(sbi), addr, diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 69a2027e3ebc..9848f0516a7e 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -358,33 +358,34 @@ static int sanity_check_node_chain(struct f2fs_sb_info *sbi, block_t blkaddr, block_t *blkaddr_fast, bool *is_detecting) { unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS; - struct page *page = NULL; int i; if (!*is_detecting) return 0; for (i = 0; i < 2; i++) { + struct folio *folio; + if (!f2fs_is_valid_blkaddr(sbi, *blkaddr_fast, META_POR)) { *is_detecting = false; return 0; } - page = f2fs_get_tmp_page(sbi, *blkaddr_fast); - if (IS_ERR(page)) - return PTR_ERR(page); + folio = f2fs_get_tmp_folio(sbi, *blkaddr_fast); + if (IS_ERR(folio)) + return PTR_ERR(folio); - if (!is_recoverable_dnode(page)) { - f2fs_put_page(page, 1); + if (!is_recoverable_dnode(&folio->page)) { + f2fs_folio_put(folio, true); *is_detecting = false; return 0; } ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, *blkaddr_fast, - next_blkaddr_of_node(page)); + next_blkaddr_of_node(&folio->page)); - *blkaddr_fast = next_blkaddr_of_node(page); - f2fs_put_page(page, 1); + *blkaddr_fast = next_blkaddr_of_node(&folio->page); + f2fs_folio_put(folio, true); f2fs_ra_meta_pages_cond(sbi, *blkaddr_fast, ra_blocks); } @@ -401,7 +402,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, bool check_only) { struct curseg_info *curseg; - struct page *page = NULL; block_t blkaddr, blkaddr_fast; bool is_detecting = true; int err = 0; @@ -413,33 +413,35 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, while (1) { struct fsync_inode_entry *entry; + struct folio *folio; if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) return 0; - page = f2fs_get_tmp_page(sbi, blkaddr); - if (IS_ERR(page)) { - err = PTR_ERR(page); + folio = f2fs_get_tmp_folio(sbi, blkaddr); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); break; } - if (!is_recoverable_dnode(page)) { - f2fs_put_page(page, 1); + if (!is_recoverable_dnode(&folio->page)) { + f2fs_folio_put(folio, true); break; } - if (!is_fsync_dnode(page)) + if (!is_fsync_dnode(&folio->page)) goto next; - entry = get_fsync_inode(head, ino_of_node(page)); + entry = get_fsync_inode(head, ino_of_node(&folio->page)); if (!entry) { bool quota_inode = false; if (!check_only && - IS_INODE(page) && is_dent_dnode(page)) { - err = f2fs_recover_inode_page(sbi, page); + IS_INODE(&folio->page) && + is_dent_dnode(&folio->page)) { + err = f2fs_recover_inode_page(sbi, &folio->page); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); break; } quota_inode = true; @@ -449,24 +451,24 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, * CP | dnode(F) | inode(DF) * For this case, we should not give up now. */ - entry = add_fsync_inode(sbi, head, ino_of_node(page), + entry = add_fsync_inode(sbi, head, ino_of_node(&folio->page), quota_inode); if (IS_ERR(entry)) { err = PTR_ERR(entry); if (err == -ENOENT) goto next; - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); break; } } entry->blkaddr = blkaddr; - if (IS_INODE(page) && is_dent_dnode(page)) + if (IS_INODE(&folio->page) && is_dent_dnode(&folio->page)) entry->last_dentry = blkaddr; next: /* check next segment */ - blkaddr = next_blkaddr_of_node(page); - f2fs_put_page(page, 1); + blkaddr = next_blkaddr_of_node(&folio->page); + f2fs_folio_put(folio, true); err = sanity_check_node_chain(sbi, blkaddr, &blkaddr_fast, &is_detecting); @@ -773,7 +775,6 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, struct list_head *tmp_inode_list, struct list_head *dir_list) { struct curseg_info *curseg; - struct page *page = NULL; int err = 0; block_t blkaddr; unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS; @@ -784,22 +785,23 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, while (1) { struct fsync_inode_entry *entry; + struct folio *folio; if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) break; - page = f2fs_get_tmp_page(sbi, blkaddr); - if (IS_ERR(page)) { - err = PTR_ERR(page); + folio = f2fs_get_tmp_folio(sbi, blkaddr); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); break; } - if (!is_recoverable_dnode(page)) { - f2fs_put_page(page, 1); + if (!is_recoverable_dnode(&folio->page)) { + f2fs_folio_put(folio, true); break; } - entry = get_fsync_inode(inode_list, ino_of_node(page)); + entry = get_fsync_inode(inode_list, ino_of_node(&folio->page)); if (!entry) goto next; /* @@ -807,23 +809,23 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, * In this case, we can lose the latest inode(x). * So, call recover_inode for the inode update. */ - if (IS_INODE(page)) { - err = recover_inode(entry->inode, page); + if (IS_INODE(&folio->page)) { + err = recover_inode(entry->inode, &folio->page); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); break; } } if (entry->last_dentry == blkaddr) { - err = recover_dentry(entry->inode, page, dir_list); + err = recover_dentry(entry->inode, &folio->page, dir_list); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); break; } } - err = do_recover_data(sbi, entry->inode, page); + err = do_recover_data(sbi, entry->inode, &folio->page); if (err) { - f2fs_put_page(page, 1); + f2fs_folio_put(folio, true); break; } @@ -831,11 +833,11 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, list_move_tail(&entry->list, tmp_inode_list); next: ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr, - next_blkaddr_of_node(page)); + next_blkaddr_of_node(&folio->page)); /* check next segment */ - blkaddr = next_blkaddr_of_node(page); - f2fs_put_page(page, 1); + blkaddr = next_blkaddr_of_node(&folio->page); + f2fs_folio_put(folio, true); f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks); } -- 2.51.0 From f24f7f8cd6e8ff21ad40677f26f8c73a7d855843 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:37 +0100 Subject: [PATCH 13/16] f2fs: Pass a folio to next_blkaddr_of_node() Pass the folio into sanity_check_node_footer() so that we can pass it further into next_blkaddr_of_node(). Removes a lot of conversions from folio->page. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/node.c | 8 +++++--- fs/f2fs/node.h | 4 ++-- fs/f2fs/recovery.c | 10 +++++----- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 3ca321fa7495..8a8d4d9d9b05 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1457,9 +1457,11 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) } static int sanity_check_node_footer(struct f2fs_sb_info *sbi, - struct page *page, pgoff_t nid, + struct folio *folio, pgoff_t nid, enum node_type ntype) { + struct page *page = &folio->page; + if (unlikely(nid != nid_of_node(page) || (ntype == NODE_TYPE_INODE && !IS_INODE(page)) || (ntype == NODE_TYPE_XATTR && @@ -1469,7 +1471,7 @@ static int sanity_check_node_footer(struct f2fs_sb_info *sbi, "node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", ntype, nid, nid_of_node(page), ino_of_node(page), ofs_of_node(page), cpver_of_node(page), - next_blkaddr_of_node(page)); + next_blkaddr_of_node(folio)); set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER); return -EFSCORRUPTED; @@ -1519,7 +1521,7 @@ repeat: goto out_err; } page_hit: - err = sanity_check_node_footer(sbi, &folio->page, nid, ntype); + err = sanity_check_node_footer(sbi, folio, nid, ntype); if (!err) return folio; out_err: diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 103a437e6425..c58ff16f1227 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -268,9 +268,9 @@ static inline __u64 cpver_of_node(struct page *node_page) return le64_to_cpu(rn->footer.cp_ver); } -static inline block_t next_blkaddr_of_node(struct page *node_page) +static inline block_t next_blkaddr_of_node(struct folio *node_folio) { - struct f2fs_node *rn = F2FS_NODE(node_page); + struct f2fs_node *rn = F2FS_NODE(&node_folio->page); return le32_to_cpu(rn->footer.next_blkaddr); } diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 9848f0516a7e..a29bd82de93b 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -382,9 +382,9 @@ static int sanity_check_node_chain(struct f2fs_sb_info *sbi, block_t blkaddr, } ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, *blkaddr_fast, - next_blkaddr_of_node(&folio->page)); + next_blkaddr_of_node(folio)); - *blkaddr_fast = next_blkaddr_of_node(&folio->page); + *blkaddr_fast = next_blkaddr_of_node(folio); f2fs_folio_put(folio, true); f2fs_ra_meta_pages_cond(sbi, *blkaddr_fast, ra_blocks); @@ -467,7 +467,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, entry->last_dentry = blkaddr; next: /* check next segment */ - blkaddr = next_blkaddr_of_node(&folio->page); + blkaddr = next_blkaddr_of_node(folio); f2fs_folio_put(folio, true); err = sanity_check_node_chain(sbi, blkaddr, &blkaddr_fast, @@ -833,10 +833,10 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, list_move_tail(&entry->list, tmp_inode_list); next: ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr, - next_blkaddr_of_node(&folio->page)); + next_blkaddr_of_node(folio)); /* check next segment */ - blkaddr = next_blkaddr_of_node(&folio->page); + blkaddr = next_blkaddr_of_node(folio); f2fs_folio_put(folio, true); f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks); -- 2.51.0 From 95e3117621e9373cdf39b88a7a4a8f353428d111 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:38 +0100 Subject: [PATCH 14/16] f2fs: Use a folio in f2fs_ra_meta_pages() Remove three hidden calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 1820cb14f716..ad3fef66ee54 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -252,7 +252,6 @@ bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi, int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type, bool sync) { - struct page *page; block_t blkno = start; struct f2fs_io_info fio = { .sbi = sbi, @@ -271,6 +270,7 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, blk_start_plug(&plug); for (; nrpages-- > 0; blkno++) { + struct folio *folio; if (!f2fs_is_valid_blkaddr(sbi, blkno, type)) goto out; @@ -300,18 +300,18 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, BUG(); } - page = f2fs_grab_cache_page(META_MAPPING(sbi), + folio = f2fs_grab_cache_folio(META_MAPPING(sbi), fio.new_blkaddr, false); - if (!page) + if (IS_ERR(folio)) continue; - if (PageUptodate(page)) { - f2fs_put_page(page, 1); + if (folio_test_uptodate(folio)) { + f2fs_folio_put(folio, true); continue; } - fio.page = page; + fio.page = &folio->page; err = f2fs_submit_page_bio(&fio); - f2fs_put_page(page, err ? 1 : 0); + f2fs_folio_put(folio, err ? true : false); if (!err) f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, -- 2.51.0 From 2525a784737b2fb7c23fe3d2efa21658100419e4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:39 +0100 Subject: [PATCH 15/16] f2fs: Use a folio in f2fs_ra_meta_pages_cond() Remove a call to find_get_page(). Saves two hidden calls to compound_head(). Change f2fs_folio_put() to check for IS_ERR_OR_NULL to handle the case where we got an error pointer back from filemap_get_folio(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 8 ++++---- fs/f2fs/f2fs.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index ad3fef66ee54..3fa66bfba100 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -325,16 +325,16 @@ out: void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, unsigned int ra_blocks) { - struct page *page; + struct folio *folio; bool readahead = false; if (ra_blocks == RECOVERY_MIN_RA_BLOCKS) return; - page = find_get_page(META_MAPPING(sbi), index); - if (!page || !PageUptodate(page)) + folio = filemap_get_folio(META_MAPPING(sbi), index); + if (IS_ERR(folio) || !folio_test_uptodate(folio)) readahead = true; - f2fs_put_page(page, 0); + f2fs_folio_put(folio, false); if (readahead) f2fs_ra_meta_pages(sbi, index, ra_blocks, META_POR, true); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 3e9cb3206bba..9915f31ee2d1 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -2885,7 +2885,7 @@ static inline struct page *f2fs_pagecache_get_page( static inline void f2fs_folio_put(struct folio *folio, bool unlock) { - if (!folio) + if (IS_ERR_OR_NULL(folio)) return; if (unlock) { -- 2.51.0 From 643d16687d7a0fc072ba73c27f5ebc511c5a684d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 31 Mar 2025 21:10:40 +0100 Subject: [PATCH 16/16] f2fs: Use a folio in write_orphan_inodes() Call f2fs_grab_meta_folio() instead of f2fs_grab_meta_page(). Removes four hidden calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 3fa66bfba100..6896e769b60b 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -784,7 +784,7 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) unsigned int nentries = 0; unsigned short index = 1; unsigned short orphan_blocks; - struct page *page = NULL; + struct folio *folio = NULL; struct ino_entry *orphan = NULL; struct inode_management *im = &sbi->im[ORPHAN_INO]; @@ -799,10 +799,9 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) /* loop for each orphan inode entry and write them in journal block */ list_for_each_entry(orphan, head, list) { - if (!page) { - page = f2fs_grab_meta_page(sbi, start_blk++); - orphan_blk = - (struct f2fs_orphan_block *)page_address(page); + if (!folio) { + folio = f2fs_grab_meta_folio(sbi, start_blk++); + orphan_blk = folio_address(folio); memset(orphan_blk, 0, sizeof(*orphan_blk)); } @@ -817,20 +816,20 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) orphan_blk->blk_addr = cpu_to_le16(index); orphan_blk->blk_count = cpu_to_le16(orphan_blocks); orphan_blk->entry_count = cpu_to_le32(nentries); - set_page_dirty(page); - f2fs_put_page(page, 1); + folio_mark_dirty(folio); + f2fs_folio_put(folio, true); index++; nentries = 0; - page = NULL; + folio = NULL; } } - if (page) { + if (folio) { orphan_blk->blk_addr = cpu_to_le16(index); orphan_blk->blk_count = cpu_to_le16(orphan_blocks); orphan_blk->entry_count = cpu_to_le32(nentries); - set_page_dirty(page); - f2fs_put_page(page, 1); + folio_mark_dirty(folio); + f2fs_folio_put(folio, true); } } -- 2.51.0