cache->length = key->offset;
        cache->used = btrfs_stack_block_group_used(bgi);
        cache->flags = btrfs_stack_block_group_flags(bgi);
+       cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi);
 
        set_free_space_tree_thresholds(cache);
 
        spin_lock(&block_group->lock);
        btrfs_set_stack_block_group_used(&bgi, block_group->used);
        btrfs_set_stack_block_group_chunk_objectid(&bgi,
-                               BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+                                                  block_group->global_root_id);
        btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
        key.objectid = block_group->start;
        key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
        btrfs_trans_release_chunk_metadata(trans);
 }
 
+/*
+ * For extent tree v2 we use the block_group_item->chunk_offset to point at our
+ * global root id.  For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID.
+ */
+static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset)
+{
+       u64 div = SZ_1G;
+       u64 index;
+
+       if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
+               return BTRFS_FIRST_CHUNK_TREE_OBJECTID;
+
+       /* If we have a smaller fs index based on 128MiB. */
+       if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL))
+               div = SZ_128M;
+
+       offset = div64_u64(offset, div);
+       div64_u64_rem(offset, fs_info->nr_global_roots, &index);
+       return index;
+}
+
 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
                                                 u64 bytes_used, u64 type,
                                                 u64 chunk_offset, u64 size)
        cache->flags = type;
        cache->last_byte_to_unpin = (u64)-1;
        cache->cached = BTRFS_CACHE_FINISHED;
+       cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
+
        if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
                cache->needs_free_space = 1;
 
        bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
        btrfs_set_stack_block_group_used(&bgi, cache->used);
        btrfs_set_stack_block_group_chunk_objectid(&bgi,
-                       BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+                                                  cache->global_root_id);
        btrfs_set_stack_block_group_flags(&bgi, cache->flags);
        write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
        btrfs_mark_buffer_dirty(leaf);
 
        u64 bytes_super;
        u64 flags;
        u64 cache_generation;
+       u64 global_root_id;
 
        /*
         * If the free space extent count exceeds this number, convert the block
 
        spinlock_t relocation_bg_lock;
        u64 data_reloc_bg;
 
+       u64 nr_global_roots;
+
        spinlock_t zone_active_bgs_lock;
        struct list_head zone_active_bgs;
 
 
        return root;
 }
 
+static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+       struct btrfs_block_group *block_group;
+       u64 ret;
+
+       if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
+               return 0;
+
+       if (bytenr)
+               block_group = btrfs_lookup_block_group(fs_info, bytenr);
+       else
+               block_group = btrfs_lookup_first_block_group(fs_info, bytenr);
+       ASSERT(block_group);
+       if (!block_group)
+               return 0;
+       ret = block_group->global_root_id;
+       btrfs_put_block_group(block_group);
+
+       return ret;
+}
+
 struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr)
 {
        struct btrfs_key key = {
                .objectid = BTRFS_CSUM_TREE_OBJECTID,
                .type = BTRFS_ROOT_ITEM_KEY,
-               .offset = 0,
+               .offset = btrfs_global_root_id(fs_info, bytenr),
        };
 
        return btrfs_global_root(fs_info, &key);
        struct btrfs_key key = {
                .objectid = BTRFS_EXTENT_TREE_OBJECTID,
                .type = BTRFS_ROOT_ITEM_KEY,
-               .offset = 0,
+               .offset = btrfs_global_root_id(fs_info, bytenr),
        };
 
        return btrfs_global_root(fs_info, &key);
 {
        const int next_backup = info->backup_root_index;
        struct btrfs_root_backup *root_backup;
-       struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
 
        root_backup = info->super_for_commit->super_roots + next_backup;
 
                        btrfs_header_level(info->block_group_root->node));
        } else {
                struct btrfs_root *extent_root = btrfs_extent_root(info, 0);
+               struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
 
                btrfs_set_backup_extent_root(root_backup,
                                             extent_root->node->start);
                                btrfs_header_generation(extent_root->node));
                btrfs_set_backup_extent_root_level(root_backup,
                                        btrfs_header_level(extent_root->node));
+
+               btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
+               btrfs_set_backup_csum_root_gen(root_backup,
+                                              btrfs_header_generation(csum_root->node));
+               btrfs_set_backup_csum_root_level(root_backup,
+                                                btrfs_header_level(csum_root->node));
        }
 
        /*
        btrfs_set_backup_dev_root_level(root_backup,
                                       btrfs_header_level(info->dev_root->node));
 
-       btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
-       btrfs_set_backup_csum_root_gen(root_backup,
-                                      btrfs_header_generation(csum_root->node));
-       btrfs_set_backup_csum_root_level(root_backup,
-                                        btrfs_header_level(csum_root->node));
-
        btrfs_set_backup_total_bytes(root_backup,
                             btrfs_super_total_bytes(info->super_copy));
        btrfs_set_backup_bytes_used(root_backup,
 {
        struct btrfs_fs_info *fs_info = tree_root->fs_info;
        struct btrfs_root *root;
+       u64 max_global_id = 0;
        int ret;
        struct btrfs_key key = {
                .objectid = objectid,
                        break;
                btrfs_release_path(path);
 
+               /*
+                * Just worry about this for extent tree, it'll be the same for
+                * everybody.
+                */
+               if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
+                       max_global_id = max(max_global_id, key.offset);
+
                found = true;
                root = read_tree_root_path(tree_root, path, &key);
                if (IS_ERR(root)) {
        }
        btrfs_release_path(path);
 
+       if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
+               fs_info->nr_global_roots = max_global_id + 1;
+
        if (!found || ret) {
                if (objectid == BTRFS_CSUM_TREE_OBJECTID)
                        set_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
 
                .offset = 0,
        };
 
+       if (btrfs_fs_incompat(block_group->fs_info, EXTENT_TREE_V2))
+               key.offset = block_group->global_root_id;
        return btrfs_global_root(block_group->fs_info, &key);
 }
 
 
                super->cache_generation = 0;
        if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
                super->uuid_tree_generation = root_item->generation;
+
+       if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+               root_item = &fs_info->block_group_root->root_item;
+
+               super->block_group_root = root_item->bytenr;
+               super->block_group_root_generation = root_item->generation;
+               super->block_group_root_level = root_item->level;
+       }
 }
 
 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
        list_add_tail(&fs_info->chunk_root->dirty_list,
                      &cur_trans->switch_commits);
 
+       if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+               btrfs_set_root_node(&fs_info->block_group_root->root_item,
+                                   fs_info->block_group_root->node);
+               list_add_tail(&fs_info->block_group_root->dirty_list,
+                             &cur_trans->switch_commits);
+       }
+
        switch_commit_roots(trans);
 
        ASSERT(list_empty(&cur_trans->dirty_bgs));
 
 static int check_block_group_item(struct extent_buffer *leaf,
                                  struct btrfs_key *key, int slot)
 {
+       struct btrfs_fs_info *fs_info = leaf->fs_info;
        struct btrfs_block_group_item bgi;
        u32 item_size = btrfs_item_size(leaf, slot);
+       u64 chunk_objectid;
        u64 flags;
        u64 type;
 
 
        read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
                           sizeof(bgi));
-       if (unlikely(btrfs_stack_block_group_chunk_objectid(&bgi) !=
-                    BTRFS_FIRST_CHUNK_TREE_OBJECTID)) {
+       chunk_objectid = btrfs_stack_block_group_chunk_objectid(&bgi);
+       if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
+               /*
+                * We don't init the nr_global_roots until we load the global
+                * roots, so this could be 0 at mount time.  If it's 0 we'll
+                * just assume we're fine, and later we'll check against our
+                * actual value.
+                */
+               if (unlikely(fs_info->nr_global_roots &&
+                            chunk_objectid >= fs_info->nr_global_roots)) {
+                       block_group_err(leaf, slot,
+       "invalid block group global root id, have %llu, needs to be <= %llu",
+                                       chunk_objectid,
+                                       fs_info->nr_global_roots);
+                       return -EUCLEAN;
+               }
+       } else if (unlikely(chunk_objectid != BTRFS_FIRST_CHUNK_TREE_OBJECTID)) {
                block_group_err(leaf, slot,
                "invalid block group chunk objectid, have %llu expect %llu",
                                btrfs_stack_block_group_chunk_objectid(&bgi),