return ret;
 }
 
-static long btrfs_scan_inode(struct btrfs_inode *inode, long *scanned, long nr_to_scan)
+struct btrfs_em_shrink_ctx {
+       long nr_to_scan;
+       long scanned;
+       u64 last_ino;
+       u64 last_root;
+};
+
+static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx)
 {
        const u64 cur_fs_gen = btrfs_get_fs_generation(inode->root->fs_info);
        struct extent_map_tree *tree = &inode->extent_tree;
 
                em = rb_entry(node, struct extent_map, rb_node);
                node = rb_next(node);
-               (*scanned)++;
+               ctx->scanned++;
 
                if (em->flags & EXTENT_FLAG_PINNED)
                        goto next;
                free_extent_map(em);
                nr_dropped++;
 next:
-               if (*scanned >= nr_to_scan)
+               if (ctx->scanned >= ctx->nr_to_scan)
                        break;
 
                /*
        return nr_dropped;
 }
 
-static long btrfs_scan_root(struct btrfs_root *root, long *scanned, long nr_to_scan)
+static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
 {
-       struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_inode *inode;
        long nr_dropped = 0;
-       u64 min_ino = fs_info->extent_map_shrinker_last_ino + 1;
+       u64 min_ino = ctx->last_ino + 1;
 
        inode = btrfs_find_first_inode(root, min_ino);
        while (inode) {
-               nr_dropped += btrfs_scan_inode(inode, scanned, nr_to_scan);
+               nr_dropped += btrfs_scan_inode(inode, ctx);
 
                min_ino = btrfs_ino(inode) + 1;
-               fs_info->extent_map_shrinker_last_ino = btrfs_ino(inode);
+               ctx->last_ino = btrfs_ino(inode);
                btrfs_add_delayed_iput(inode);
 
-               if (*scanned >= nr_to_scan)
+               if (ctx->scanned >= ctx->nr_to_scan)
                        break;
 
                /*
                 * inode if there is one or we will find out this was the last
                 * one and move to the next root.
                 */
-               fs_info->extent_map_shrinker_last_root = btrfs_root_id(root);
+               ctx->last_root = btrfs_root_id(root);
        } else {
                /*
                 * No more inodes in this root, set extent_map_shrinker_last_ino to 0 so
                 * that when processing the next root we start from its first inode.
                 */
-               fs_info->extent_map_shrinker_last_ino = 0;
-               fs_info->extent_map_shrinker_last_root = btrfs_root_id(root) + 1;
+               ctx->last_ino = 0;
+               ctx->last_root = btrfs_root_id(root) + 1;
        }
 
        return nr_dropped;
 
 long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
 {
-       const u64 start_root_id = fs_info->extent_map_shrinker_last_root;
-       u64 next_root_id = start_root_id;
+       struct btrfs_em_shrink_ctx ctx;
+       u64 start_root_id;
+       u64 next_root_id;
        bool cycled = false;
        long nr_dropped = 0;
-       long scanned = 0;
+
+       ctx.scanned = 0;
+       ctx.nr_to_scan = nr_to_scan;
+
+       /*
+        * In case we have multiple tasks running this shrinker, make the next
+        * one start from the next inode in case it starts before we finish.
+        */
+       spin_lock(&fs_info->extent_map_shrinker_lock);
+       ctx.last_ino = fs_info->extent_map_shrinker_last_ino;
+       fs_info->extent_map_shrinker_last_ino++;
+       ctx.last_root = fs_info->extent_map_shrinker_last_root;
+       spin_unlock(&fs_info->extent_map_shrinker_lock);
+
+       start_root_id = ctx.last_root;
+       next_root_id = ctx.last_root;
 
        if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) {
                s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
 
-               trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr_to_scan, nr);
+               trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr_to_scan,
+                                                          nr, ctx.last_root,
+                                                          ctx.last_ino);
        }
 
        /*
         * We may be called from memory allocation paths, so we don't want to
         * take too much time and slowdown tasks, so stop if we need reschedule.
         */
-       while (scanned < nr_to_scan && !need_resched()) {
+       while (ctx.scanned < ctx.nr_to_scan && !need_resched()) {
                struct btrfs_root *root;
                unsigned long count;
 
                        spin_unlock(&fs_info->fs_roots_radix_lock);
                        if (start_root_id > 0 && !cycled) {
                                next_root_id = 0;
-                               fs_info->extent_map_shrinker_last_root = 0;
-                               fs_info->extent_map_shrinker_last_ino = 0;
+                               ctx.last_root = 0;
+                               ctx.last_ino = 0;
                                cycled = true;
                                continue;
                        }
                        continue;
 
                if (is_fstree(btrfs_root_id(root)))
-                       nr_dropped += btrfs_scan_root(root, &scanned, nr_to_scan);
+                       nr_dropped += btrfs_scan_root(root, &ctx);
 
                btrfs_put_root(root);
        }
 
+       /*
+        * In case of multiple tasks running this extent map shrinking code this
+        * isn't perfect but it's simple and silences things like KCSAN. It's
+        * not possible to know which task made more progress because we can
+        * cycle back to the first root and first inode if it's not the first
+        * time the shrinker ran, see the above logic. Also a task that started
+        * later may finish ealier than another task and made less progress. So
+        * make this simple and update to the progress of the last task that
+        * finished, with the occasional possiblity of having two consecutive
+        * runs of the shrinker process the same inodes.
+        */
+       spin_lock(&fs_info->extent_map_shrinker_lock);
+       fs_info->extent_map_shrinker_last_ino = ctx.last_ino;
+       fs_info->extent_map_shrinker_last_root = ctx.last_root;
+       spin_unlock(&fs_info->extent_map_shrinker_lock);
+
        if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) {
                s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
 
-               trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, nr);
+               trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped,
+                                                         nr, ctx.last_root,
+                                                         ctx.last_ino);
        }
 
        return nr_dropped;
 
 
 TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
 
-       TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_to_scan, long nr),
+       TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_to_scan, long nr,
+                u64 last_root_id, u64 last_ino),
 
-       TP_ARGS(fs_info, nr_to_scan, nr),
+       TP_ARGS(fs_info, nr_to_scan, nr, last_root_id, last_ino),
 
        TP_STRUCT__entry_btrfs(
                __field(        long,   nr_to_scan      )
        TP_fast_assign_btrfs(fs_info,
                __entry->nr_to_scan     = nr_to_scan;
                __entry->nr             = nr;
-               __entry->last_root_id   = fs_info->extent_map_shrinker_last_root;
-               __entry->last_ino       = fs_info->extent_map_shrinker_last_ino;
+               __entry->last_root_id   = last_root_id;
+               __entry->last_ino       = last_ino;
        ),
 
        TP_printk_btrfs("nr_to_scan=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",
 
 TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit,
 
-       TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr),
+       TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr,
+                u64 last_root_id, u64 last_ino),
 
-       TP_ARGS(fs_info, nr_dropped, nr),
+       TP_ARGS(fs_info, nr_dropped, nr, last_root_id, last_ino),
 
        TP_STRUCT__entry_btrfs(
                __field(        long,   nr_dropped      )
        TP_fast_assign_btrfs(fs_info,
                __entry->nr_dropped     = nr_dropped;
                __entry->nr             = nr;
-               __entry->last_root_id   = fs_info->extent_map_shrinker_last_root;
-               __entry->last_ino       = fs_info->extent_map_shrinker_last_ino;
+               __entry->last_root_id   = last_root_id;
+               __entry->last_ino       = last_ino;
        ),
 
        TP_printk_btrfs("nr_dropped=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",