]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
btrfs: move btrfs_destroy_delayed_refs() to delayed-ref.c
authorFilipe Manana <fdmanana@suse.com>
Thu, 17 Oct 2024 15:23:41 +0000 (16:23 +0100)
committerDavid Sterba <dsterba@suse.com>
Mon, 11 Nov 2024 13:34:19 +0000 (14:34 +0100)
It's better suited at delayed-ref.c since it's about delayed refs and
contains logic to iterate over them (using the red black tree, doing all
the locking, freeing, etc), so move it from disk-io.c, which is pretty
big, into delayed-ref.c, hiding implementation details of how delayed
refs are tracked and managed. This also facilitates the next patches in
the series.

This change moves the code between files but also does the following
simple cleanups:

1) Rename the 'cache' variable to 'bg', since it's a block group
   (the 'cache' logic comes from old days where the block group
   structure was named 'btrfs_block_group_cache');

2) Move the 'ref' variable declaration to the scope of the inner
   while loop, since it's not used outside that loop.

Reviewed-by: Boris Burkov <boris@bur.io>
Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/disk-io.c

index 1684857554c609059100e9e59becdf3e2afc5613..9e661f9a71b0a51a9cd60d5bd82513a028e4913a 100644 (file)
@@ -9,6 +9,7 @@
 #include "messages.h"
 #include "ctree.h"
 #include "delayed-ref.h"
+#include "extent-tree.h"
 #include "transaction.h"
 #include "qgroup.h"
 #include "space-info.h"
@@ -1238,6 +1239,86 @@ bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
        return found;
 }
 
+void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+                               struct btrfs_fs_info *fs_info)
+{
+       struct rb_node *node;
+       struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
+
+       spin_lock(&delayed_refs->lock);
+       while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
+               struct btrfs_delayed_ref_head *head;
+               struct rb_node *n;
+               bool pin_bytes = false;
+
+               head = rb_entry(node, struct btrfs_delayed_ref_head,
+                               href_node);
+               if (btrfs_delayed_ref_lock(delayed_refs, head))
+                       continue;
+
+               spin_lock(&head->lock);
+               while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
+                       struct btrfs_delayed_ref_node *ref;
+
+                       ref = rb_entry(n, struct btrfs_delayed_ref_node, ref_node);
+                       rb_erase_cached(&ref->ref_node, &head->ref_tree);
+                       RB_CLEAR_NODE(&ref->ref_node);
+                       if (!list_empty(&ref->add_list))
+                               list_del(&ref->add_list);
+                       atomic_dec(&delayed_refs->num_entries);
+                       btrfs_put_delayed_ref(ref);
+                       btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
+               }
+               if (head->must_insert_reserved)
+                       pin_bytes = true;
+               btrfs_free_delayed_extent_op(head->extent_op);
+               btrfs_delete_ref_head(delayed_refs, head);
+               spin_unlock(&head->lock);
+               spin_unlock(&delayed_refs->lock);
+               mutex_unlock(&head->mutex);
+
+               if (pin_bytes) {
+                       struct btrfs_block_group *bg;
+
+                       bg = btrfs_lookup_block_group(fs_info, head->bytenr);
+                       if (WARN_ON_ONCE(bg == NULL)) {
+                               /*
+                                * Unexpected and there's nothing we can do here
+                                * because we are in a transaction abort path,
+                                * so any errors can only be ignored or reported
+                                * while attempting to cleanup all resources.
+                                */
+                               btrfs_err(fs_info,
+"block group for delayed ref at %llu was not found while destroying ref head",
+                                         head->bytenr);
+                       } else {
+                               spin_lock(&bg->space_info->lock);
+                               spin_lock(&bg->lock);
+                               bg->pinned += head->num_bytes;
+                               btrfs_space_info_update_bytes_pinned(fs_info,
+                                                                    bg->space_info,
+                                                                    head->num_bytes);
+                               bg->reserved -= head->num_bytes;
+                               bg->space_info->bytes_reserved -= head->num_bytes;
+                               spin_unlock(&bg->lock);
+                               spin_unlock(&bg->space_info->lock);
+
+                               btrfs_put_block_group(bg);
+                       }
+
+                       btrfs_error_unpin_extent_range(fs_info, head->bytenr,
+                               head->bytenr + head->num_bytes - 1);
+               }
+               btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
+               btrfs_put_delayed_ref_head(head);
+               cond_resched();
+               spin_lock(&delayed_refs->lock);
+       }
+       btrfs_qgroup_destroy_extent_records(trans);
+
+       spin_unlock(&delayed_refs->lock);
+}
+
 void __cold btrfs_delayed_ref_exit(void)
 {
        kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
index 352921e76c7424a4724ed537b2cc003d190be398..ccc040f9426484194b9a172bb7df4fdc7a896b22 100644 (file)
@@ -399,6 +399,8 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
 bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
                                 u64 root, u64 parent);
+void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+                               struct btrfs_fs_info *fs_info);
 
 static inline u64 btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node *node)
 {
index 47598e525ea51074751bce91c3e48e3c5cf1e10e..f5d30c04ba6643fc55101fb482f0c515240c5a0f 100644 (file)
@@ -4529,86 +4529,6 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
        btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
 }
 
-static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
-                                      struct btrfs_fs_info *fs_info)
-{
-       struct rb_node *node;
-       struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
-       struct btrfs_delayed_ref_node *ref;
-
-       spin_lock(&delayed_refs->lock);
-       while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
-               struct btrfs_delayed_ref_head *head;
-               struct rb_node *n;
-               bool pin_bytes = false;
-
-               head = rb_entry(node, struct btrfs_delayed_ref_head,
-                               href_node);
-               if (btrfs_delayed_ref_lock(delayed_refs, head))
-                       continue;
-
-               spin_lock(&head->lock);
-               while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
-                       ref = rb_entry(n, struct btrfs_delayed_ref_node,
-                                      ref_node);
-                       rb_erase_cached(&ref->ref_node, &head->ref_tree);
-                       RB_CLEAR_NODE(&ref->ref_node);
-                       if (!list_empty(&ref->add_list))
-                               list_del(&ref->add_list);
-                       atomic_dec(&delayed_refs->num_entries);
-                       btrfs_put_delayed_ref(ref);
-                       btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
-               }
-               if (head->must_insert_reserved)
-                       pin_bytes = true;
-               btrfs_free_delayed_extent_op(head->extent_op);
-               btrfs_delete_ref_head(delayed_refs, head);
-               spin_unlock(&head->lock);
-               spin_unlock(&delayed_refs->lock);
-               mutex_unlock(&head->mutex);
-
-               if (pin_bytes) {
-                       struct btrfs_block_group *cache;
-
-                       cache = btrfs_lookup_block_group(fs_info, head->bytenr);
-                       if (WARN_ON_ONCE(cache == NULL)) {
-                               /*
-                                * Unexpected and there's nothing we can do here
-                                * because we are in a transaction abort path,
-                                * so any errors can only be ignored or reported
-                                * while attempting to cleanup all resources.
-                                */
-                               btrfs_err(fs_info,
-"block group for delayed ref at %llu was not found while destroying ref head",
-                                         head->bytenr);
-                       } else {
-                               spin_lock(&cache->space_info->lock);
-                               spin_lock(&cache->lock);
-                               cache->pinned += head->num_bytes;
-                               btrfs_space_info_update_bytes_pinned(fs_info,
-                                                                    cache->space_info,
-                                                                    head->num_bytes);
-                               cache->reserved -= head->num_bytes;
-                               cache->space_info->bytes_reserved -= head->num_bytes;
-                               spin_unlock(&cache->lock);
-                               spin_unlock(&cache->space_info->lock);
-
-                               btrfs_put_block_group(cache);
-                       }
-
-                       btrfs_error_unpin_extent_range(fs_info, head->bytenr,
-                               head->bytenr + head->num_bytes - 1);
-               }
-               btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
-               btrfs_put_delayed_ref_head(head);
-               cond_resched();
-               spin_lock(&delayed_refs->lock);
-       }
-       btrfs_qgroup_destroy_extent_records(trans);
-
-       spin_unlock(&delayed_refs->lock);
-}
-
 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
 {
        struct btrfs_inode *btrfs_inode;