]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
btrfs: remove num_entries atomic counter from delayed ref root
authorFilipe Manana <fdmanana@suse.com>
Wed, 23 Oct 2024 13:14:11 +0000 (14:14 +0100)
committerDavid Sterba <dsterba@suse.com>
Mon, 11 Nov 2024 13:34:20 +0000 (14:34 +0100)
The atomic counter 'num_entries' is not used anymore, we increment it
and decrement it but then we don't ever read it to use for any logic.
Its last use was removed with commit 61a56a992fcf ("btrfs: delayed refs
pre-flushing should only run the heads we have"). So remove it.

Reviewed-by: Boris Burkov <boris@bur.io>
Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/extent-tree.c
fs/btrfs/transaction.c

index dc3a29f3c3577a739106ccb8fb64d6920e3f59be..f7c7d1249f04f8fcf741cae2c62d3558e261f017 100644 (file)
@@ -463,7 +463,6 @@ static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
        if (!list_empty(&ref->add_list))
                list_del(&ref->add_list);
        btrfs_put_delayed_ref(ref);
-       atomic_dec(&delayed_refs->num_entries);
        btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
 }
 
@@ -604,7 +603,6 @@ void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
 
        rb_erase_cached(&head->href_node, &delayed_refs->href_root);
        RB_CLEAR_NODE(&head->href_node);
-       atomic_dec(&delayed_refs->num_entries);
        delayed_refs->num_heads--;
        if (!head->processing)
                delayed_refs->num_heads_ready--;
@@ -630,7 +628,6 @@ static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
        if (!exist) {
                if (ref->action == BTRFS_ADD_DELAYED_REF)
                        list_add_tail(&ref->add_list, &href->ref_add_list);
-               atomic_inc(&root->num_entries);
                spin_unlock(&href->lock);
                trans->delayed_ref_updates++;
                return false;
@@ -901,7 +898,6 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
                }
                delayed_refs->num_heads++;
                delayed_refs->num_heads_ready++;
-               atomic_inc(&delayed_refs->num_entries);
        }
        if (qrecord_inserted_ret)
                *qrecord_inserted_ret = qrecord_inserted;
index cc78395f2fcd0c4a440ff3e796c3f2244227518e..a97c9df19ea0715fc6122348fbd0b6a57e48c2d3 100644 (file)
@@ -216,11 +216,6 @@ struct btrfs_delayed_ref_root {
        /* this spin lock protects the rbtree and the entries inside */
        spinlock_t lock;
 
-       /* how many delayed ref updates we've queued, used by the
-        * throttling code
-        */
-       atomic_t num_entries;
-
        /* total number of head nodes in tree */
        unsigned long num_heads;
 
index 594d18ed908c47b1e25a27dd2c869cc9f90641ac..adff2b6fb629409e98cd3e9252479d444d4f6174 100644 (file)
@@ -2029,7 +2029,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
                default:
                        WARN_ON(1);
                }
-               atomic_dec(&delayed_refs->num_entries);
 
                /*
                 * Record the must_insert_reserved flag before we drop the
index e580c566f03320fd07532c593482b6659296d07a..9ccf68ab53f9b4b62891415e07541c7e583fdb06 100644 (file)
@@ -351,7 +351,6 @@ loop:
 
        cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
        xa_init(&cur_trans->delayed_refs.dirty_extents);
-       atomic_set(&cur_trans->delayed_refs.num_entries, 0);
 
        /*
         * although the tree mod log is per file system and not per transaction,