root->log_transid_committed = -1;
        root->last_log_commit = 0;
        if (!dummy)
-               extent_io_tree_init(fs_info, &root->dirty_log_pages, NULL);
+               extent_io_tree_init(fs_info, &root->dirty_log_pages,
+                                   IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL);
 
        memset(&root->root_key, 0, sizeof(root->root_key));
        memset(&root->root_item, 0, sizeof(root->root_item));
        inode->i_mapping->a_ops = &btree_aops;
 
        RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
-       extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree, inode);
+       extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
+                           IO_TREE_INODE_IO, inode);
        BTRFS_I(inode)->io_tree.track_uptodate = false;
        extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
 
        fs_info->block_group_cache_tree = RB_ROOT;
        fs_info->first_logical_byte = (u64)-1;
 
-       extent_io_tree_init(fs_info, &fs_info->freed_extents[0], NULL);
-       extent_io_tree_init(fs_info, &fs_info->freed_extents[1], NULL);
+       extent_io_tree_init(fs_info, &fs_info->freed_extents[0],
+                           IO_TREE_FS_INFO_FREED_EXTENTS0, NULL);
+       extent_io_tree_init(fs_info, &fs_info->freed_extents[1],
+                           IO_TREE_FS_INFO_FREED_EXTENTS1, NULL);
        fs_info->pinned_extents = &fs_info->freed_extents[0];
        set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
 
 
 }
 
 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
-                        struct extent_io_tree *tree, void *private_data)
+                        struct extent_io_tree *tree, unsigned int owner,
+                        void *private_data)
 {
        tree->fs_info = fs_info;
        tree->state = RB_ROOT;
        tree->dirty_bytes = 0;
        spin_lock_init(&tree->lock);
        tree->private_data = private_data;
+       tree->owner = owner;
 }
 
 static struct extent_state *alloc_extent_state(gfp_t mask)
 
                                    int mirror);
 };
 
+enum {
+       IO_TREE_FS_INFO_FREED_EXTENTS0,
+       IO_TREE_FS_INFO_FREED_EXTENTS1,
+       IO_TREE_INODE_IO,
+       IO_TREE_INODE_IO_FAILURE,
+       IO_TREE_RELOC_BLOCKS,
+       IO_TREE_TRANS_DIRTY_PAGES,
+       IO_TREE_ROOT_DIRTY_LOG_PAGES,
+       IO_TREE_SELFTEST,
+};
+
 struct extent_io_tree {
        struct rb_root state;
        struct btrfs_fs_info *fs_info;
        void *private_data;
        u64 dirty_bytes;
        bool track_uptodate;
+
+       /* Who owns this io tree, should be one of IO_TREE_* */
+       u8 owner;
+
        spinlock_t lock;
        const struct extent_io_ops *ops;
 };
                                          int create);
 
 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
-                        struct extent_io_tree *tree, void *private_data);
+                        struct extent_io_tree *tree, unsigned int owner,
+                        void *private_data);
 int try_release_extent_mapping(struct page *page, gfp_t mask);
 int try_release_extent_buffer(struct page *page);
 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 
 
        inode = &ei->vfs_inode;
        extent_map_tree_init(&ei->extent_tree);
-       extent_io_tree_init(fs_info, &ei->io_tree, inode);
-       extent_io_tree_init(fs_info, &ei->io_failure_tree, inode);
+       extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode);
+       extent_io_tree_init(fs_info, &ei->io_failure_tree,
+                           IO_TREE_INODE_IO_FAILURE, inode);
        ei->io_tree.track_uptodate = true;
        ei->io_failure_tree.track_uptodate = true;
        atomic_set(&ei->sync_writers, 0);
 
        INIT_LIST_HEAD(&rc->dirty_subvol_roots);
        backref_cache_init(&rc->backref_cache);
        mapping_tree_init(&rc->reloc_root_tree);
-       extent_io_tree_init(fs_info, &rc->processed_blocks, NULL);
+       extent_io_tree_init(fs_info, &rc->processed_blocks,
+                           IO_TREE_RELOC_BLOCKS, NULL);
        return rc;
 }
 
 
        INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
        INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
        INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
-       extent_io_tree_init(fs_info, &fs_info->freed_extents[0], NULL);
-       extent_io_tree_init(fs_info, &fs_info->freed_extents[1], NULL);
+       extent_io_tree_init(fs_info, &fs_info->freed_extents[0],
+                           IO_TREE_FS_INFO_FREED_EXTENTS0, NULL);
+       extent_io_tree_init(fs_info, &fs_info->freed_extents[1],
+                           IO_TREE_FS_INFO_FREED_EXTENTS1, NULL);
        fs_info->pinned_extents = &fs_info->freed_extents[0];
        set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
 
 
                return -ENOMEM;
        }
 
-       extent_io_tree_init(NULL, &tmp, NULL);
+       /*
+        * Passing NULL as we don't have fs_info but tracepoints are not used
+        * at this point
+        */
+       extent_io_tree_init(NULL, &tmp, IO_TREE_SELFTEST, NULL);
 
        /*
         * First go through and create and mark all of our pages dirty, we pin
 
        spin_lock_init(&cur_trans->dropped_roots_lock);
        list_add_tail(&cur_trans->list, &fs_info->trans_list);
        extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
-                            fs_info->btree_inode);
+                       IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
        fs_info->generation++;
        cur_trans->transid = fs_info->generation;
        fs_info->running_transaction = cur_trans;