path->slots[0]);
        return 0;
 }
+
+void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
+                             struct btrfs_backref_cache *cache, int is_reloc)
+{
+       int i;
+
+       cache->rb_root = RB_ROOT;
+       for (i = 0; i < BTRFS_MAX_LEVEL; i++)
+               INIT_LIST_HEAD(&cache->pending[i]);
+       INIT_LIST_HEAD(&cache->changed);
+       INIT_LIST_HEAD(&cache->detached);
+       INIT_LIST_HEAD(&cache->leaves);
+       INIT_LIST_HEAD(&cache->pending_edge);
+       INIT_LIST_HEAD(&cache->useless_node);
+       cache->fs_info = fs_info;
+       cache->is_reloc = is_reloc;
+}
 
        spin_lock_init(&tree->lock);
 }
 
-static void backref_cache_init(struct btrfs_fs_info *fs_info,
-                              struct btrfs_backref_cache *cache, int is_reloc)
-{
-       int i;
-       cache->rb_root = RB_ROOT;
-       for (i = 0; i < BTRFS_MAX_LEVEL; i++)
-               INIT_LIST_HEAD(&cache->pending[i]);
-       INIT_LIST_HEAD(&cache->changed);
-       INIT_LIST_HEAD(&cache->detached);
-       INIT_LIST_HEAD(&cache->leaves);
-       INIT_LIST_HEAD(&cache->pending_edge);
-       INIT_LIST_HEAD(&cache->useless_node);
-       cache->fs_info = fs_info;
-       cache->is_reloc = is_reloc;
-}
-
 static void backref_cache_cleanup(struct btrfs_backref_cache *cache)
 {
        struct btrfs_backref_node *node;
 
        INIT_LIST_HEAD(&rc->reloc_roots);
        INIT_LIST_HEAD(&rc->dirty_subvol_roots);
-       backref_cache_init(fs_info, &rc->backref_cache, 1);
+       btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
        mapping_tree_init(&rc->reloc_root_tree);
        extent_io_tree_init(fs_info, &rc->processed_blocks,
                            IO_TREE_RELOC_BLOCKS, NULL);