]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
btrfs: simplify tracking progress for the extent map shrinker
authorFilipe Manana <fdmanana@suse.com>
Wed, 4 Sep 2024 16:03:43 +0000 (17:03 +0100)
committerDavid Sterba <dsterba@suse.com>
Mon, 11 Nov 2024 13:34:17 +0000 (14:34 +0100)
Now that the extent map shrinker can only be run by a single task (as a
work queue item) there is no need to keep the progress of the shrinker
protected by a spinlock and passing the progress to trace events as
parameters. So remove the lock and simplify the arguments for the trace
events.

Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/disk-io.c
fs/btrfs/extent_map.c
fs/btrfs/fs.h
include/trace/events/btrfs.h

index 8ad625a8d9f943844b257842b970dec5dfa65282..0cbc31908264c2a740569aabb9799b595f2691a9 100644 (file)
@@ -2852,8 +2852,6 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
        if (ret)
                return ret;
 
-       spin_lock_init(&fs_info->extent_map_shrinker_lock);
-
        ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
        if (ret)
                return ret;
index 4b9ab3f9c731a7e7e3db3e1ee5018e5bc77228b0..f6e338dd28ecc9b3a068a1dc7a35cd3601c484fc 100644 (file)
@@ -1118,8 +1118,6 @@ out_free_pre:
 struct btrfs_em_shrink_ctx {
        long nr_to_scan;
        long scanned;
-       u64 last_ino;
-       u64 last_root;
 };
 
 static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx)
@@ -1211,16 +1209,17 @@ next:
 
 static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
 {
+       struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_inode *inode;
        long nr_dropped = 0;
-       u64 min_ino = ctx->last_ino + 1;
+       u64 min_ino = fs_info->extent_map_shrinker_last_ino + 1;
 
        inode = btrfs_find_first_inode(root, min_ino);
        while (inode) {
                nr_dropped += btrfs_scan_inode(inode, ctx);
 
                min_ino = btrfs_ino(inode) + 1;
-               ctx->last_ino = btrfs_ino(inode);
+               fs_info->extent_map_shrinker_last_ino = btrfs_ino(inode);
                btrfs_add_delayed_iput(inode);
 
                if (ctx->scanned >= ctx->nr_to_scan ||
@@ -1240,14 +1239,14 @@ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx
                 * inode if there is one or we will find out this was the last
                 * one and move to the next root.
                 */
-               ctx->last_root = btrfs_root_id(root);
+               fs_info->extent_map_shrinker_last_root = btrfs_root_id(root);
        } else {
                /*
                 * No more inodes in this root, set extent_map_shrinker_last_ino to 0 so
                 * that when processing the next root we start from its first inode.
                 */
-               ctx->last_ino = 0;
-               ctx->last_root = btrfs_root_id(root) + 1;
+               fs_info->extent_map_shrinker_last_ino = 0;
+               fs_info->extent_map_shrinker_last_root = btrfs_root_id(root) + 1;
        }
 
        return nr_dropped;
@@ -1267,25 +1266,13 @@ static void btrfs_extent_map_shrinker_worker(struct work_struct *work)
        ctx.scanned = 0;
        ctx.nr_to_scan = atomic64_read(&fs_info->extent_map_shrinker_nr_to_scan);
 
-       /*
-        * In case we have multiple tasks running this shrinker, make the next
-        * one start from the next inode in case it starts before we finish.
-        */
-       spin_lock(&fs_info->extent_map_shrinker_lock);
-       ctx.last_ino = fs_info->extent_map_shrinker_last_ino;
-       fs_info->extent_map_shrinker_last_ino++;
-       ctx.last_root = fs_info->extent_map_shrinker_last_root;
-       spin_unlock(&fs_info->extent_map_shrinker_lock);
-
-       start_root_id = ctx.last_root;
-       next_root_id = ctx.last_root;
+       start_root_id = fs_info->extent_map_shrinker_last_root;
+       next_root_id = fs_info->extent_map_shrinker_last_root;
 
        if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) {
                s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
 
-               trace_btrfs_extent_map_shrinker_scan_enter(fs_info, ctx.nr_to_scan,
-                                                          nr, ctx.last_root,
-                                                          ctx.last_ino);
+               trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr);
        }
 
        while (ctx.scanned < ctx.nr_to_scan && !btrfs_fs_closing(fs_info)) {
@@ -1302,8 +1289,8 @@ static void btrfs_extent_map_shrinker_worker(struct work_struct *work)
                        spin_unlock(&fs_info->fs_roots_radix_lock);
                        if (start_root_id > 0 && !cycled) {
                                next_root_id = 0;
-                               ctx.last_root = 0;
-                               ctx.last_ino = 0;
+                               fs_info->extent_map_shrinker_last_root = 0;
+                               fs_info->extent_map_shrinker_last_ino = 0;
                                cycled = true;
                                continue;
                        }
@@ -1322,28 +1309,10 @@ static void btrfs_extent_map_shrinker_worker(struct work_struct *work)
                btrfs_put_root(root);
        }
 
-       /*
-        * In case of multiple tasks running this extent map shrinking code this
-        * isn't perfect but it's simple and silences things like KCSAN. It's
-        * not possible to know which task made more progress because we can
-        * cycle back to the first root and first inode if it's not the first
-        * time the shrinker ran, see the above logic. Also a task that started
-        * later may finish earlier than another task and made less progress. So
-        * make this simple and update to the progress of the last task that
-        * finished, with the occasional possibility of having two consecutive
-        * runs of the shrinker process the same inodes.
-        */
-       spin_lock(&fs_info->extent_map_shrinker_lock);
-       fs_info->extent_map_shrinker_last_ino = ctx.last_ino;
-       fs_info->extent_map_shrinker_last_root = ctx.last_root;
-       spin_unlock(&fs_info->extent_map_shrinker_lock);
-
        if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) {
                s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
 
-               trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped,
-                                                         nr, ctx.last_root,
-                                                         ctx.last_ino);
+               trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, nr);
        }
 
        atomic64_set(&fs_info->extent_map_shrinker_nr_to_scan, 0);
index 678ce5259332bb81e046237f0e86cf45edaec812..827af2b027df01d72bb8230cc8dd674af1083416 100644 (file)
@@ -635,7 +635,6 @@ struct btrfs_fs_info {
        s32 delalloc_batch;
 
        struct percpu_counter evictable_extent_maps;
-       spinlock_t extent_map_shrinker_lock;
        u64 extent_map_shrinker_last_root;
        u64 extent_map_shrinker_last_ino;
        atomic64_t extent_map_shrinker_nr_to_scan;
index bd515415ea8b817eff34531711f2c2b1e385dc62..879016b4e391cd7fabc8143648767b8d46793257 100644 (file)
@@ -2555,10 +2555,9 @@ TRACE_EVENT(btrfs_extent_map_shrinker_count,
 
 TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
 
-       TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_to_scan, long nr,
-                u64 last_root_id, u64 last_ino),
+       TP_PROTO(const struct btrfs_fs_info *fs_info, long nr),
 
-       TP_ARGS(fs_info, nr_to_scan, nr, last_root_id, last_ino),
+       TP_ARGS(fs_info, nr),
 
        TP_STRUCT__entry_btrfs(
                __field(        long,   nr_to_scan      )
@@ -2568,10 +2567,11 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
        ),
 
        TP_fast_assign_btrfs(fs_info,
-               __entry->nr_to_scan     = nr_to_scan;
+               __entry->nr_to_scan     = \
+                    atomic64_read(&fs_info->extent_map_shrinker_nr_to_scan);
                __entry->nr             = nr;
-               __entry->last_root_id   = last_root_id;
-               __entry->last_ino       = last_ino;
+               __entry->last_root_id   = fs_info->extent_map_shrinker_last_root;
+               __entry->last_ino       = fs_info->extent_map_shrinker_last_ino;
        ),
 
        TP_printk_btrfs("nr_to_scan=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",
@@ -2581,10 +2581,9 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
 
 TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit,
 
-       TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr,
-                u64 last_root_id, u64 last_ino),
+       TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr),
 
-       TP_ARGS(fs_info, nr_dropped, nr, last_root_id, last_ino),
+       TP_ARGS(fs_info, nr_dropped, nr),
 
        TP_STRUCT__entry_btrfs(
                __field(        long,   nr_dropped      )
@@ -2596,8 +2595,8 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit,
        TP_fast_assign_btrfs(fs_info,
                __entry->nr_dropped     = nr_dropped;
                __entry->nr             = nr;
-               __entry->last_root_id   = last_root_id;
-               __entry->last_ino       = last_ino;
+               __entry->last_root_id   = fs_info->extent_map_shrinker_last_root;
+               __entry->last_ino       = fs_info->extent_map_shrinker_last_ino;
        ),
 
        TP_printk_btrfs("nr_dropped=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",