From c5e268022465c6a6fa8dac88e6b6fd17e2386b35 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 24 Sep 2024 17:37:28 +0100 Subject: [PATCH] btrfs: remove unnecessary delayed refs locking at btrfs_qgroup_trace_extent() There's no need to hold the delayed refs spinlock when calling btrfs_qgroup_trace_extent_nolock() from btrfs_qgroup_trace_extent(), since it doesn't change anything in delayed refs and it only changes the xarray used to track qgroup extent records, which is protected by the xarray's lock. Holding the lock is only adding unnecessary lock contention with other tasks that actually need to take the lock to add/remove/change delayed references. So remove the locking. Reviewed-by: Qu Wenruo Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/qgroup.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 3f6bff0b6d82..316a12f8b51d 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2021,7 +2021,6 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, } #endif - lockdep_assert_held(&delayed_refs->lock); trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr); xa_lock(&delayed_refs->dirty_extents); @@ -2160,9 +2159,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, record->num_bytes = num_bytes; record->old_roots = NULL; - spin_lock(&delayed_refs->lock); ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr); - spin_unlock(&delayed_refs->lock); if (ret) { /* Clean up if insertion fails or item exists. */ xa_release(&delayed_refs->dirty_extents, index); -- 2.50.1