]> www.infradead.org Git - linux.git/commitdiff
btrfs: convert wait_subpage_spinlock() to only use a folio
authorJosef Bacik <josef@toxicpanda.com>
Thu, 25 Jul 2024 00:20:24 +0000 (20:20 -0400)
committerDavid Sterba <dsterba@suse.com>
Tue, 10 Sep 2024 14:51:16 +0000 (16:51 +0200)
Currently this already uses a folio for most things, update it to take a
folio and update all the page usage with the corresponding folio usage.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/inode.c

index 05e74ef068968e15161c1e9d7df238ef28ef9a34..0e5db913d6bb47861f295e9edd24eeb3c2eac986 100644 (file)
@@ -7187,13 +7187,12 @@ struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
  * for subpage spinlock.  So this function is to spin and wait for subpage
  * spinlock.
  */
-static void wait_subpage_spinlock(struct page *page)
+static void wait_subpage_spinlock(struct folio *folio)
 {
-       struct btrfs_fs_info *fs_info = page_to_fs_info(page);
-       struct folio *folio = page_folio(page);
+       struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
        struct btrfs_subpage *subpage;
 
-       if (!btrfs_is_subpage(fs_info, page->mapping))
+       if (!btrfs_is_subpage(fs_info, folio->mapping))
                return;
 
        ASSERT(folio_test_private(folio) && folio_get_private(folio));
@@ -7223,7 +7222,7 @@ static int btrfs_launder_folio(struct folio *folio)
 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
 {
        if (try_release_extent_mapping(&folio->page, gfp_flags)) {
-               wait_subpage_spinlock(&folio->page);
+               wait_subpage_spinlock(folio);
                clear_page_extent_mapped(&folio->page);
                return true;
        }
@@ -7284,7 +7283,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
         * do double ordered extent accounting on the same folio.
         */
        folio_wait_writeback(folio);
-       wait_subpage_spinlock(&folio->page);
+       wait_subpage_spinlock(folio);
 
        /*
         * For subpage case, we have call sites like