]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
btrfs: rename btrfs_folio_(set|start|end)_writer_lock()
authorQu Wenruo <wqu@suse.com>
Wed, 9 Oct 2024 05:51:07 +0000 (16:21 +1030)
committerDavid Sterba <dsterba@suse.com>
Mon, 11 Nov 2024 13:34:18 +0000 (14:34 +0100)
Since there is no user of reader locks, rename the writer locks into a
more generic name, by removing the "_writer" part from the name.

And also rename btrfs_subpage::writer into btrfs_subpage::locked.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/compression.c
fs/btrfs/extent_io.c
fs/btrfs/subpage.c
fs/btrfs/subpage.h

index e51d55bd1a29b9733c8db139f26dfb2fdf8dcd2c..21d73ea594a9993ca1fcf68dec60ec7d436ef602 100644 (file)
@@ -545,7 +545,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
                 * subpage::readers and to unlock the page.
                 */
                if (fs_info->sectorsize < PAGE_SIZE)
-                       btrfs_folio_set_writer_lock(fs_info, folio, cur, add_size);
+                       btrfs_folio_set_lock(fs_info, folio, cur, add_size);
                folio_put(folio);
                cur += add_size;
        }
index 3eb9ed5d55f26a5008a64ec8acba18d717572295..1beaba23253242b73ae1598178dc4ac9a254d8bf 100644 (file)
@@ -190,7 +190,7 @@ static void process_one_folio(struct btrfs_fs_info *fs_info,
                btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
 
        if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
-               btrfs_folio_end_writer_lock(fs_info, folio, start, len);
+               btrfs_folio_end_lock(fs_info, folio, start, len);
 }
 
 static void __process_folios_contig(struct address_space *mapping,
@@ -276,7 +276,7 @@ static noinline int lock_delalloc_folios(struct inode *inode,
                        range_start = max_t(u64, folio_pos(folio), start);
                        range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
                                          end + 1) - range_start;
-                       btrfs_folio_set_writer_lock(fs_info, folio, range_start, range_len);
+                       btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
 
                        processed_end = range_start + range_len - 1;
                }
@@ -438,7 +438,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
        if (!btrfs_is_subpage(fs_info, folio->mapping))
                folio_unlock(folio);
        else
-               btrfs_folio_end_writer_lock(fs_info, folio, start, len);
+               btrfs_folio_end_lock(fs_info, folio, start, len);
 }
 
 /*
@@ -495,7 +495,7 @@ static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
                return;
 
        ASSERT(folio_test_private(folio));
-       btrfs_folio_set_writer_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
+       btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
 }
 
 /*
@@ -1184,7 +1184,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
        for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
                u64 start = page_start + (bit << fs_info->sectorsize_bits);
 
-               btrfs_folio_set_writer_lock(fs_info, folio, start, fs_info->sectorsize);
+               btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
        }
 
        /* Lock all (subpage) delalloc ranges inside the folio first. */
@@ -1520,7 +1520,7 @@ done:
         * Only unlock ranges that are submitted. As there can be some async
         * submitted ranges inside the folio.
         */
-       btrfs_folio_end_writer_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
+       btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
        ASSERT(ret <= 0);
        return ret;
 }
@@ -2298,7 +2298,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
                                                       cur, cur_len, !ret);
                        mapping_set_error(mapping, ret);
                }
-               btrfs_folio_end_writer_lock(fs_info, folio, cur, cur_len);
+               btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
                if (ret < 0)
                        found_error = true;
 next_page:
index 6c3d54a459564ab3b29460193975994d7b1aa030..d4cab3c557425da779248b2de3a368a8bcd00c9d 100644 (file)
@@ -143,7 +143,7 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
        if (type == BTRFS_SUBPAGE_METADATA)
                atomic_set(&ret->eb_refs, 0);
        else
-               atomic_set(&ret->writers, 0);
+               atomic_set(&ret->nr_locked, 0);
        return ret;
 }
 
@@ -237,8 +237,8 @@ static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
                             orig_start + orig_len) - *start;
 }
 
-static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
-                                             struct folio *folio, u64 start, u32 len)
+static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
+                                           struct folio *folio, u64 start, u32 len)
 {
        struct btrfs_subpage *subpage = folio_get_private(folio);
        const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
@@ -256,9 +256,9 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
         * extent_clear_unlock_delalloc() for compression path.
         *
         * This @locked_page is locked by plain lock_page(), thus its
-        * subpage::writers is 0.  Handle them in a special way.
+        * subpage::locked is 0.  Handle them in a special way.
         */
-       if (atomic_read(&subpage->writers) == 0) {
+       if (atomic_read(&subpage->nr_locked) == 0) {
                spin_unlock_irqrestore(&subpage->lock, flags);
                return true;
        }
@@ -267,8 +267,8 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
                clear_bit(bit, subpage->bitmaps);
                cleared++;
        }
-       ASSERT(atomic_read(&subpage->writers) >= cleared);
-       last = atomic_sub_and_test(cleared, &subpage->writers);
+       ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
+       last = atomic_sub_and_test(cleared, &subpage->nr_locked);
        spin_unlock_irqrestore(&subpage->lock, flags);
        return last;
 }
@@ -289,8 +289,8 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
  *   bitmap, reduce the writer lock number, and unlock the page if that's
  *   the last locked range.
  */
-void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
-                                struct folio *folio, u64 start, u32 len)
+void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
+                         struct folio *folio, u64 start, u32 len)
 {
        struct btrfs_subpage *subpage = folio_get_private(folio);
 
@@ -303,24 +303,24 @@ void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
 
        /*
         * For subpage case, there are two types of locked page.  With or
-        * without writers number.
+        * without locked number.
         *
-        * Since we own the page lock, no one else could touch subpage::writers
+        * Since we own the page lock, no one else could touch subpage::locked
         * and we are safe to do several atomic operations without spinlock.
         */
-       if (atomic_read(&subpage->writers) == 0) {
-               /* No writers, locked by plain lock_page(). */
+       if (atomic_read(&subpage->nr_locked) == 0) {
+               /* No subpage lock, locked by plain lock_page(). */
                folio_unlock(folio);
                return;
        }
 
        btrfs_subpage_clamp_range(folio, &start, &len);
-       if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len))
+       if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len))
                folio_unlock(folio);
 }
 
-void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
-                                       struct folio *folio, unsigned long bitmap)
+void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
+                                struct folio *folio, unsigned long bitmap)
 {
        struct btrfs_subpage *subpage = folio_get_private(folio);
        const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked;
@@ -334,8 +334,8 @@ void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
                return;
        }
 
-       if (atomic_read(&subpage->writers) == 0) {
-               /* No writers, locked by plain lock_page(). */
+       if (atomic_read(&subpage->nr_locked) == 0) {
+               /* No subpage lock, locked by plain lock_page(). */
                folio_unlock(folio);
                return;
        }
@@ -345,8 +345,8 @@ void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
                if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
                        cleared++;
        }
-       ASSERT(atomic_read(&subpage->writers) >= cleared);
-       last = atomic_sub_and_test(cleared, &subpage->writers);
+       ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
+       last = atomic_sub_and_test(cleared, &subpage->nr_locked);
        spin_unlock_irqrestore(&subpage->lock, flags);
        if (last)
                folio_unlock(folio);
@@ -671,8 +671,8 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
  * This populates the involved subpage ranges so that subpage helpers can
  * properly unlock them.
  */
-void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
-                                struct folio *folio, u64 start, u32 len)
+void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
+                         struct folio *folio, u64 start, u32 len)
 {
        struct btrfs_subpage *subpage;
        unsigned long flags;
@@ -691,7 +691,7 @@ void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
        /* Target range should not yet be locked. */
        ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
        bitmap_set(subpage->bitmaps, start_bit, nbits);
-       ret = atomic_add_return(nbits, &subpage->writers);
+       ret = atomic_add_return(nbits, &subpage->nr_locked);
        ASSERT(ret <= fs_info->sectors_per_page);
        spin_unlock_irqrestore(&subpage->lock, flags);
 }
index c150aba9318edd60b9f9d7c0fc84399a3a031d90..428fa9389fd49e30962ff6b7d32feef61c7bf4d4 100644 (file)
@@ -54,8 +54,12 @@ struct btrfs_subpage {
                 */
                atomic_t eb_refs;
 
-               /* Structures only used by data */
-               atomic_t writers;
+               /*
+                * Structures only used by data,
+                *
+                * How many sectors inside the page is locked.
+                */
+               atomic_t nr_locked;
        };
        unsigned long bitmaps[];
 };
@@ -87,12 +91,12 @@ void btrfs_free_subpage(struct btrfs_subpage *subpage);
 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
 
-void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
-                                struct folio *folio, u64 start, u32 len);
-void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
-                                struct folio *folio, u64 start, u32 len);
-void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
-                                       struct folio *folio, unsigned long bitmap);
+void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
+                         struct folio *folio, u64 start, u32 len);
+void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
+                         struct folio *folio, u64 start, u32 len);
+void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
+                                struct folio *folio, unsigned long bitmap);
 /*
  * Template for subpage related operations.
  *