btrfs_set_root_last_log_commit(root, 0);
        root->anon_dev = 0;
        if (!dummy) {
-               extent_io_tree_init(fs_info, &root->dirty_log_pages,
-                                   IO_TREE_ROOT_DIRTY_LOG_PAGES);
-               extent_io_tree_init(fs_info, &root->log_csum_range,
-                                   IO_TREE_LOG_CSUM_RANGE);
+               btrfs_extent_io_tree_init(fs_info, &root->dirty_log_pages,
+                                         IO_TREE_ROOT_DIRTY_LOG_PAGES);
+               btrfs_extent_io_tree_init(fs_info, &root->log_csum_range,
+                                         IO_TREE_LOG_CSUM_RANGE);
        }
 
        spin_lock_init(&root->root_item_lock);
        inode->i_mapping->a_ops = &btree_aops;
        mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
 
-       extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
-                           IO_TREE_BTREE_INODE_IO);
+       btrfs_extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
+                                 IO_TREE_BTREE_INODE_IO);
        extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
 
        BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
        rwlock_init(&fs_info->block_group_cache_lock);
        fs_info->block_group_cache_tree = RB_ROOT_CACHED;
 
-       extent_io_tree_init(fs_info, &fs_info->excluded_extents,
-                           IO_TREE_FS_EXCLUDED_EXTENTS);
+       btrfs_extent_io_tree_init(fs_info, &fs_info->excluded_extents,
+                                 IO_TREE_FS_EXCLUDED_EXTENTS);
 
        mutex_init(&fs_info->ordered_operations_mutex);
        mutex_init(&fs_info->tree_log_mutex);
 
        return tree->fs_info;
 }
 
-void extent_io_tree_init(struct btrfs_fs_info *fs_info,
-                        struct extent_io_tree *tree, unsigned int owner)
+void btrfs_extent_io_tree_init(struct btrfs_fs_info *fs_info,
+                              struct extent_io_tree *tree, unsigned int owner)
 {
        tree->state = RB_ROOT;
        spin_lock_init(&tree->lock);
  * aren't any waiters on any extent state record (EXTENT_LOCK_BITS are never
  * set on any extent state when calling this function).
  */
-void extent_io_tree_release(struct extent_io_tree *tree)
+void btrfs_extent_io_tree_release(struct extent_io_tree *tree)
 {
        struct rb_root root;
        struct extent_state *state;
 
 const struct btrfs_inode *btrfs_extent_io_tree_to_inode(const struct extent_io_tree *tree);
 const struct btrfs_fs_info *btrfs_extent_io_tree_to_fs_info(const struct extent_io_tree *tree);
 
-void extent_io_tree_init(struct btrfs_fs_info *fs_info,
-                        struct extent_io_tree *tree, unsigned int owner);
-void extent_io_tree_release(struct extent_io_tree *tree);
+void btrfs_extent_io_tree_init(struct btrfs_fs_info *fs_info,
+                              struct extent_io_tree *tree, unsigned int owner);
+void btrfs_extent_io_tree_release(struct extent_io_tree *tree);
 int btrfs_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
                           struct extent_state **cached);
 bool btrfs_try_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 
        if (!inode->file_extent_tree)
                return -ENOMEM;
 
-       extent_io_tree_init(fs_info, inode->file_extent_tree, IO_TREE_INODE_FILE_EXTENT);
+       btrfs_extent_io_tree_init(fs_info, inode->file_extent_tree,
+                                 IO_TREE_INODE_FILE_EXTENT);
        /* Lockdep class is set only for the file extent tree. */
        lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class);
 
        extent_map_tree_init(&ei->extent_tree);
 
        /* This io tree sets the valid inode. */
-       extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
+       btrfs_extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
        ei->io_tree.inode = ei;
 
        ei->file_extent_tree = NULL;
 
        btrfs_backref_init_cache(fs_info, &rc->backref_cache, true);
        rc->reloc_root_tree.rb_root = RB_ROOT;
        spin_lock_init(&rc->reloc_root_tree.lock);
-       extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
+       btrfs_extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
        return rc;
 }
 
 
        if (!dev)
                return ERR_PTR(-ENOMEM);
 
-       extent_io_tree_init(fs_info, &dev->alloc_state, 0);
+       btrfs_extent_io_tree_init(fs_info, &dev->alloc_state, 0);
        INIT_LIST_HEAD(&dev->dev_list);
        list_add(&dev->dev_list, &fs_info->fs_devices->devices);
 
 
 static void btrfs_free_dummy_device(struct btrfs_device *dev)
 {
-       extent_io_tree_release(&dev->alloc_state);
+       btrfs_extent_io_tree_release(&dev->alloc_state);
        kfree(dev);
 }
 
 
         * Passing NULL as we don't have fs_info but tracepoints are not used
         * at this point
         */
-       extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST);
+       btrfs_extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST);
 
        /*
         * First go through and create and mark all of our pages dirty, we pin
 
        test_msg("running find_first_clear_extent_bit test");
 
-       extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST);
+       btrfs_extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST);
 
        /* Test correct handling of empty tree */
        btrfs_find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED);
 
                list_del_init(&root->dirty_list);
                free_extent_buffer(root->commit_root);
                root->commit_root = btrfs_root_node(root);
-               extent_io_tree_release(&root->dirty_log_pages);
+               btrfs_extent_io_tree_release(&root->dirty_log_pages);
                btrfs_qgroup_clean_swapped_blocks(root);
        }
 
        INIT_LIST_HEAD(&cur_trans->deleted_bgs);
        spin_lock_init(&cur_trans->dropped_roots_lock);
        list_add_tail(&cur_trans->list, &fs_info->trans_list);
-       extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
-                       IO_TREE_TRANS_DIRTY_PAGES);
-       extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
-                       IO_TREE_FS_PINNED_EXTENTS);
+       btrfs_extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
+                                 IO_TREE_TRANS_DIRTY_PAGES);
+       btrfs_extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
+                                 IO_TREE_FS_PINNED_EXTENTS);
        btrfs_set_fs_generation(fs_info, fs_info->generation + 1);
        cur_trans->transid = fs_info->generation;
        fs_info->running_transaction = cur_trans;
        blk_finish_plug(&plug);
        ret2 = btrfs_wait_extents(fs_info, dirty_pages);
 
-       extent_io_tree_release(&trans->transaction->dirty_pages);
+       btrfs_extent_io_tree_release(&trans->transaction->dirty_pages);
 
        if (ret)
                return ret;
 
                }
        }
 
-       extent_io_tree_release(&log->dirty_log_pages);
-       extent_io_tree_release(&log->log_csum_range);
+       btrfs_extent_io_tree_release(&log->dirty_log_pages);
+       btrfs_extent_io_tree_release(&log->log_csum_range);
 
        btrfs_put_root(log);
 }
 
 {
        WARN_ON(!list_empty(&device->post_commit_list));
        rcu_string_free(device->name);
-       extent_io_tree_release(&device->alloc_state);
+       btrfs_extent_io_tree_release(&device->alloc_state);
        btrfs_destroy_dev_zone_info(device);
        kfree(device);
 }
 
        device->fs_info = NULL;
        atomic_set(&device->dev_stats_ccnt, 0);
-       extent_io_tree_release(&device->alloc_state);
+       btrfs_extent_io_tree_release(&device->alloc_state);
 
        /*
         * Reset the flush error record. We might have a transient flush error
 
        atomic_set(&dev->dev_stats_ccnt, 0);
        btrfs_device_data_ordered_init(dev);
-       extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE);
+       btrfs_extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE);
 
        if (devid)
                tmp = *devid;