]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
btrfs: remove unnecessary delayed refs locking at btrfs_qgroup_trace_extent()
authorFilipe Manana <fdmanana@suse.com>
Tue, 24 Sep 2024 16:37:28 +0000 (17:37 +0100)
committerDavid Sterba <dsterba@suse.com>
Mon, 11 Nov 2024 13:34:13 +0000 (14:34 +0100)
There's no need to hold the delayed refs spinlock when calling
btrfs_qgroup_trace_extent_nolock() from btrfs_qgroup_trace_extent(), since
it doesn't change anything in delayed refs and it only changes the xarray
used to track qgroup extent records, which is protected by the xarray's
lock.

Holding the lock is only adding unnecessary lock contention with other
tasks that actually need to take the lock to add/remove/change delayed
references. So remove the locking.

Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/qgroup.c

index 3f6bff0b6d82df8aee1aa9a4433df90ca78fddb1..316a12f8b51d16b9f495ced0bad546d357ce1c33 100644 (file)
@@ -2021,7 +2021,6 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
        }
 #endif
 
-       lockdep_assert_held(&delayed_refs->lock);
        trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr);
 
        xa_lock(&delayed_refs->dirty_extents);
@@ -2160,9 +2159,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
        record->num_bytes = num_bytes;
        record->old_roots = NULL;
 
-       spin_lock(&delayed_refs->lock);
        ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr);
-       spin_unlock(&delayed_refs->lock);
        if (ret) {
                /* Clean up if insertion fails or item exists. */
                xa_release(&delayed_refs->dirty_extents, index);