struct btrfs_free_space *info, u64 offset,
                              u64 bytes, bool update_stats);
 
+static void __btrfs_remove_free_space_cache_locked(
+                               struct btrfs_free_space_ctl *ctl)
+{
+       struct btrfs_free_space *info;
+       struct rb_node *node;
+
+       while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
+               info = rb_entry(node, struct btrfs_free_space, offset_index);
+               if (!info->bitmap) {
+                       unlink_free_space(ctl, info, true);
+                       kmem_cache_free(btrfs_free_space_cachep, info);
+               } else {
+                       free_bitmap(ctl, info);
+               }
+
+               cond_resched_lock(&ctl->tree_lock);
+       }
+}
+
 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
                                               struct btrfs_path *path,
                                               u64 offset)
        return ret;
 free_cache:
        io_ctl_drop_pages(&io_ctl);
-       __btrfs_remove_free_space_cache(ctl);
+
+       /*
+        * We need to call the _locked variant so we don't try to update the
+        * discard counters.
+        */
+       spin_lock(&ctl->tree_lock);
+       __btrfs_remove_free_space_cache_locked(ctl);
+       spin_unlock(&ctl->tree_lock);
        goto out;
 }
 
                if (ret == 0)
                        ret = 1;
        } else {
+               /*
+                * We need to call the _locked variant so we don't try to update
+                * the discard counters.
+                */
+               spin_lock(&tmp_ctl.tree_lock);
                __btrfs_remove_free_space_cache(&tmp_ctl);
+               spin_unlock(&tmp_ctl.tree_lock);
                btrfs_warn(fs_info,
                           "block group %llu has wrong amount of free space",
                           block_group->start);
        btrfs_put_block_group(block_group);
 }
 
-static void __btrfs_remove_free_space_cache_locked(
-                               struct btrfs_free_space_ctl *ctl)
-{
-       struct btrfs_free_space *info;
-       struct rb_node *node;
-
-       while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
-               info = rb_entry(node, struct btrfs_free_space, offset_index);
-               if (!info->bitmap) {
-                       unlink_free_space(ctl, info, true);
-                       kmem_cache_free(btrfs_free_space_cachep, info);
-               } else {
-                       free_bitmap(ctl, info);
-               }
-
-               cond_resched_lock(&ctl->tree_lock);
-       }
-}
-
 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
 {
        spin_lock(&ctl->tree_lock);