parent = *p;
                cache = rb_entry(parent, struct btrfs_block_group_cache,
                                 cache_node);
-               if (block_group->key.objectid < cache->key.objectid) {
+               if (block_group->start < cache->start) {
                        p = &(*p)->rb_left;
-               } else if (block_group->key.objectid > cache->key.objectid) {
+               } else if (block_group->start > cache->start) {
                        p = &(*p)->rb_right;
                } else {
                        spin_unlock(&info->block_group_cache_lock);
        rb_insert_color(&block_group->cache_node,
                        &info->block_group_cache_tree);
 
-       if (info->first_logical_byte > block_group->key.objectid)
-               info->first_logical_byte = block_group->key.objectid;
+       if (info->first_logical_byte > block_group->start)
+               info->first_logical_byte = block_group->start;
 
        spin_unlock(&info->block_group_cache_lock);
 
        while (n) {
                cache = rb_entry(n, struct btrfs_block_group_cache,
                                 cache_node);
-               end = cache->key.objectid + cache->key.offset - 1;
-               start = cache->key.objectid;
+               end = cache->start + cache->length - 1;
+               start = cache->start;
 
                if (bytenr < start) {
-                       if (!contains && (!ret || start < ret->key.objectid))
+                       if (!contains && (!ret || start < ret->start))
                                ret = cache;
                        n = n->rb_left;
                } else if (bytenr > start) {
        }
        if (ret) {
                btrfs_get_block_group(ret);
-               if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
-                       info->first_logical_byte = ret->key.objectid;
+               if (bytenr == 0 && info->first_logical_byte > ret->start)
+                       info->first_logical_byte = ret->start;
        }
        spin_unlock(&info->block_group_cache_lock);
 
 
        /* If our block group was removed, we need a full search. */
        if (RB_EMPTY_NODE(&cache->cache_node)) {
-               const u64 next_bytenr = cache->key.objectid + cache->key.offset;
+               const u64 next_bytenr = cache->start + cache->length;
 
                spin_unlock(&fs_info->block_group_cache_lock);
                btrfs_put_block_group(cache);
 static void fragment_free_space(struct btrfs_block_group_cache *block_group)
 {
        struct btrfs_fs_info *fs_info = block_group->fs_info;
-       u64 start = block_group->key.objectid;
-       u64 len = block_group->key.offset;
+       u64 start = block_group->start;
+       u64 len = block_group->length;
        u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
                fs_info->nodesize : fs_info->sectorsize;
        u64 step = chunk << 1;
        if (!path)
                return -ENOMEM;
 
-       last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
+       last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
 
 #ifdef CONFIG_BTRFS_DEBUG
        /*
                        goto next;
                }
 
-               if (key.objectid < block_group->key.objectid) {
+               if (key.objectid < block_group->start) {
                        path->slots[0]++;
                        continue;
                }
 
-               if (key.objectid >= block_group->key.objectid +
-                   block_group->key.offset)
+               if (key.objectid >= block_group->start + block_group->length)
                        break;
 
                if (key.type == BTRFS_EXTENT_ITEM_KEY ||
        ret = 0;
 
        total_found += add_new_free_space(block_group, last,
-                                         block_group->key.objectid +
-                                         block_group->key.offset);
+                               block_group->start + block_group->length);
        caching_ctl->progress = (u64)-1;
 
 out:
 
                spin_lock(&block_group->space_info->lock);
                spin_lock(&block_group->lock);
-               bytes_used = block_group->key.offset - block_group->used;
+               bytes_used = block_group->length - block_group->used;
                block_group->space_info->bytes_used += bytes_used >> 1;
                spin_unlock(&block_group->lock);
                spin_unlock(&block_group->space_info->lock);
        mutex_init(&caching_ctl->mutex);
        init_waitqueue_head(&caching_ctl->wait);
        caching_ctl->block_group = cache;
-       caching_ctl->progress = cache->key.objectid;
+       caching_ctl->progress = cache->start;
        refcount_set(&caching_ctl->count, 1);
        btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
 
 
                        spin_lock(&cache->space_info->lock);
                        spin_lock(&cache->lock);
-                       bytes_used = cache->key.offset - cache->used;
+                       bytes_used = cache->length - cache->used;
                        cache->space_info->bytes_used += bytes_used >> 1;
                        spin_unlock(&cache->lock);
                        spin_unlock(&cache->space_info->lock);
         * remove it.
         */
        btrfs_free_excluded_extents(block_group);
-       btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
-                                 block_group->key.offset);
+       btrfs_free_ref_tree_range(fs_info, block_group->start,
+                                 block_group->length);
 
-       memcpy(&key, &block_group->key, sizeof(key));
        index = btrfs_bg_flags_to_raid_index(block_group->flags);
        factor = btrfs_bg_type_to_factor(block_group->flags);
 
        }
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
-       key.offset = block_group->key.objectid;
        key.type = 0;
+       key.offset = block_group->start;
 
        ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
        if (ret < 0)
                 &fs_info->block_group_cache_tree);
        RB_CLEAR_NODE(&block_group->cache_node);
 
-       if (fs_info->first_logical_byte == block_group->key.objectid)
+       if (fs_info->first_logical_byte == block_group->start)
                fs_info->first_logical_byte = (u64)-1;
        spin_unlock(&fs_info->block_group_cache_lock);
 
 
        if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
                WARN_ON(block_group->space_info->total_bytes
-                       < block_group->key.offset);
+                       < block_group->length);
                WARN_ON(block_group->space_info->bytes_readonly
-                       < block_group->key.offset);
+                       < block_group->length);
                WARN_ON(block_group->space_info->disk_total
-                       < block_group->key.offset * factor);
+                       < block_group->length * factor);
        }
-       block_group->space_info->total_bytes -= block_group->key.offset;
-       block_group->space_info->bytes_readonly -= block_group->key.offset;
-       block_group->space_info->disk_total -= block_group->key.offset * factor;
+       block_group->space_info->total_bytes -= block_group->length;
+       block_group->space_info->bytes_readonly -= block_group->length;
+       block_group->space_info->disk_total -= block_group->length * factor;
 
        spin_unlock(&block_group->space_info->lock);
 
-       memcpy(&key, &block_group->key, sizeof(key));
+       key.objectid = block_group->start;
+       key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+       key.offset = block_group->length;
 
        mutex_lock(&fs_info->chunk_mutex);
        spin_lock(&block_group->lock);
                goto out;
        }
 
-       num_bytes = cache->key.offset - cache->reserved - cache->pinned -
+       num_bytes = cache->length - cache->reserved - cache->pinned -
                    cache->bytes_super - cache->used;
        sinfo_used = btrfs_space_info_used(sinfo, true);
 
        spin_unlock(&sinfo->lock);
        if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
                btrfs_info(cache->fs_info,
-                       "unable to make block group %llu ro",
-                       cache->key.objectid);
+                       "unable to make block group %llu ro", cache->start);
                btrfs_info(cache->fs_info,
                        "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
                        sinfo_used, num_bytes, min_allocable_bytes);
                 * properly if we fail to join the transaction.
                 */
                trans = btrfs_start_trans_remove_block_group(fs_info,
-                                                    block_group->key.objectid);
+                                                    block_group->start);
                if (IS_ERR(trans)) {
                        btrfs_dec_block_group_ro(block_group);
                        ret = PTR_ERR(trans);
                 * We could have pending pinned extents for this block group,
                 * just delete them, we don't care about them anymore.
                 */
-               start = block_group->key.objectid;
-               end = start + block_group->key.offset - 1;
+               start = block_group->start;
+               end = start + block_group->length - 1;
                /*
                 * Hold the unused_bg_unpin_mutex lock to avoid racing with
                 * btrfs_finish_extent_commit(). If we are at transaction N,
                 * Btrfs_remove_chunk will abort the transaction if things go
                 * horribly wrong.
                 */
-               ret = btrfs_remove_chunk(trans, block_group->key.objectid);
+               ret = btrfs_remove_chunk(trans, block_group->start);
 
                if (ret) {
                        if (trimming)
        int stripe_len;
        int i, nr, ret;
 
-       if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
-               stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
+       if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
+               stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
                cache->bytes_super += stripe_len;
-               ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid,
+               ret = btrfs_add_excluded_extent(fs_info, cache->start,
                                                stripe_len);
                if (ret)
                        return ret;
 
        for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
                bytenr = btrfs_sb_offset(i);
-               ret = btrfs_rmap_block(fs_info, cache->key.objectid,
+               ret = btrfs_rmap_block(fs_info, cache->start,
                                       bytenr, &logical, &nr, &stripe_len);
                if (ret)
                        return ret;
                while (nr--) {
                        u64 start, len;
 
-                       if (logical[nr] > cache->key.objectid +
-                           cache->key.offset)
+                       if (logical[nr] > cache->start + cache->length)
                                continue;
 
-                       if (logical[nr] + stripe_len <= cache->key.objectid)
+                       if (logical[nr] + stripe_len <= cache->start)
                                continue;
 
                        start = logical[nr];
-                       if (start < cache->key.objectid) {
-                               start = cache->key.objectid;
+                       if (start < cache->start) {
+                               start = cache->start;
                                len = (logical[nr] + stripe_len) - start;
                        } else {
                                len = min_t(u64, stripe_len,
-                                           cache->key.objectid +
-                                           cache->key.offset - start);
+                                           cache->start + cache->length - start);
                        }
 
                        cache->bytes_super += len;
                return NULL;
        }
 
-       cache->key.objectid = start;
-       cache->key.offset = size;
-       cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+       cache->start = start;
+       cache->length = size;
 
        cache->fs_info = fs_info;
        cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
                        free_extent_map(em);
                        break;
                }
-               if (bg->key.objectid != em->start ||
-                   bg->key.offset != em->len ||
+               if (bg->start != em->start || bg->length != em->len ||
                    (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
                    (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
                        btrfs_err(fs_info,
 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
                                em->start, em->len,
                                em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
-                               bg->key.objectid, bg->key.offset,
+                               bg->start, bg->length,
                                bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
                        ret = -EUCLEAN;
                        free_extent_map(em);
                    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
                        btrfs_err(info,
 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
-                                 cache->key.objectid);
+                                 cache->start);
                        btrfs_put_block_group(cache);
                        ret = -EINVAL;
                        goto error;
                link_block_group(cache);
 
                set_avail_alloc_bits(info, cache->flags);
-               if (btrfs_chunk_readonly(info, cache->key.objectid)) {
+               if (btrfs_chunk_readonly(info, cache->start)) {
                        inc_block_group_ro(cache, 1);
                } else if (cache->used == 0) {
                        ASSERT(list_empty(&cache->bg_list));
                btrfs_set_stack_block_group_chunk_objectid(&item,
                                BTRFS_FIRST_CHUNK_TREE_OBJECTID);
                btrfs_set_stack_block_group_flags(&item, block_group->flags);
-               memcpy(&key, &block_group->key, sizeof(key));
+               key.objectid = block_group->start;
+               key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+               key.offset = block_group->length;
                spin_unlock(&block_group->lock);
 
                ret = btrfs_insert_item(trans, extent_root, &key, &item,
        spin_lock(&sinfo->lock);
        spin_lock(&cache->lock);
        if (!--cache->ro) {
-               num_bytes = cache->key.offset - cache->reserved -
+               num_bytes = cache->length - cache->reserved -
                            cache->pinned - cache->bytes_super - cache->used;
                sinfo->bytes_readonly -= num_bytes;
                list_del_init(&cache->ro_list);
        unsigned long bi;
        struct extent_buffer *leaf;
        struct btrfs_block_group_item bgi;
+       struct btrfs_key key;
+
+       key.objectid = cache->start;
+       key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+       key.offset = cache->length;
 
-       ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
+       ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
        if (ret) {
                if (ret > 0)
                        ret = -ENOENT;
         * If this block group is smaller than 100 megs don't bother caching the
         * block group.
         */
-       if (block_group->key.offset < (100 * SZ_1M)) {
+       if (block_group->length < (100 * SZ_1M)) {
                spin_lock(&block_group->lock);
                block_group->disk_cache_state = BTRFS_DC_WRITTEN;
                spin_unlock(&block_group->lock);
         * taking up quite a bit since it's not folded into the other space
         * cache.
         */
-       num_pages = div_u64(block_group->key.offset, SZ_256M);
+       num_pages = div_u64(block_group->length, SZ_256M);
        if (!num_pages)
                num_pages = 1;
 
                if (!alloc && !btrfs_block_group_cache_done(cache))
                        btrfs_cache_block_group(cache, 1);
 
-               byte_in_group = bytenr - cache->key.objectid;
-               WARN_ON(byte_in_group > cache->key.offset);
+               byte_in_group = bytenr - cache->start;
+               WARN_ON(byte_in_group > cache->length);
 
                spin_lock(&cache->space_info->lock);
                spin_lock(&cache->lock);
                        cache->disk_cache_state = BTRFS_DC_CLEAR;
 
                old_val = cache->used;
-               num_bytes = min(total, cache->key.offset - byte_in_group);
+               num_bytes = min(total, cache->length - byte_in_group);
                if (alloc) {
                        old_val += num_bytes;
                        cache->used = old_val;
                spin_unlock(&block_group->lock);
                ASSERT(block_group->io_ctl.inode == NULL);
                iput(inode);
-               last = block_group->key.objectid + block_group->key.offset;
+               last = block_group->start + block_group->length;
                btrfs_put_block_group(block_group);
        }
 }
 
 #define CACHING_CTL_WAKE_UP SZ_2M
 
 struct btrfs_block_group_cache {
-       struct btrfs_key key;
        struct btrfs_fs_info *fs_info;
        struct inode *inode;
        spinlock_t lock;
+       u64 start;
+       u64 length;
        u64 pinned;
        u64 reserved;
        u64 used;
 
        struct btrfs_fs_info *fs_info = cache->fs_info;
        u64 start, end;
 
-       start = cache->key.objectid;
-       end = start + cache->key.offset - 1;
+       start = cache->start;
+       end = start + cache->length - 1;
 
        clear_extent_bits(&fs_info->freed_extents[0],
                          start, end, EXTENT_UPTODATE);
        if (!cache)
                return 0;
 
-       bytenr = cache->key.objectid;
+       bytenr = cache->start;
        btrfs_put_block_group(cache);
 
        return bytenr;
        while (start <= end) {
                readonly = false;
                if (!cache ||
-                   start >= cache->key.objectid + cache->key.offset) {
+                   start >= cache->start + cache->length) {
                        if (cache)
                                btrfs_put_block_group(cache);
                        total_unpinned = 0;
                        empty_cluster <<= 1;
                }
 
-               len = cache->key.objectid + cache->key.offset - start;
+               len = cache->start + cache->length - start;
                len = min(len, end + 1 - start);
 
                if (start < cache->last_byte_to_unpin) {
                ret = -EROFS;
                if (!trans->aborted)
                        ret = btrfs_discard_extent(fs_info,
-                                                  block_group->key.objectid,
-                                                  block_group->key.offset,
+                                                  block_group->start,
+                                                  block_group->length,
                                                   &trimmed);
 
                list_del_init(&block_group->bg_list);
                goto release_cluster;
 
        offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
-                       ffe_ctl->num_bytes, cluster_bg->key.objectid,
+                       ffe_ctl->num_bytes, cluster_bg->start,
                        &ffe_ctl->max_extent_size);
        if (offset) {
                /* We have a block, we're done */
                        continue;
 
                btrfs_grab_block_group(block_group, delalloc);
-               ffe_ctl.search_start = block_group->key.objectid;
+               ffe_ctl.search_start = block_group->start;
 
                /*
                 * this can happen if we end up cycling through all the
 
                /* move on to the next group */
                if (ffe_ctl.search_start + num_bytes >
-                   block_group->key.objectid + block_group->key.offset) {
+                   block_group->start + block_group->length) {
                        btrfs_add_free_space(block_group, ffe_ctl.found_offset,
                                             num_bytes);
                        goto loop;
                }
 
                factor = btrfs_bg_type_to_factor(block_group->flags);
-               free_bytes += (block_group->key.offset -
+               free_bytes += (block_group->length -
                               block_group->used) * factor;
 
                spin_unlock(&block_group->lock);
 
        cache = btrfs_lookup_first_block_group(fs_info, range->start);
        for (; cache; cache = btrfs_next_block_group(cache)) {
-               if (cache->key.objectid >= range_end) {
+               if (cache->start >= range_end) {
                        btrfs_put_block_group(cache);
                        break;
                }
 
-               start = max(range->start, cache->key.objectid);
-               end = min(range_end, cache->key.objectid + cache->key.offset);
+               start = max(range->start, cache->start);
+               end = min(range_end, cache->start + cache->length);
 
                if (end - start >= range->minlen) {
                        if (!btrfs_block_group_cache_done(cache)) {
 
                return inode;
 
        inode = __lookup_free_space_inode(fs_info->tree_root, path,
-                                         block_group->key.objectid);
+                                         block_group->start);
        if (IS_ERR(inode))
                return inode;
 
                return ret;
 
        return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
-                                        ino, block_group->key.objectid);
+                                        ino, block_group->start);
 }
 
 int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
        spin_unlock(&block_group->lock);
 
        ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
-                                     path, block_group->key.objectid);
+                                     path, block_group->start);
        btrfs_free_path(path);
        if (ret <= 0)
                goto out;
 
        spin_lock(&ctl->tree_lock);
-       matched = (ctl->free_space == (block_group->key.offset - used -
+       matched = (ctl->free_space == (block_group->length - used -
                                       block_group->bytes_super));
        spin_unlock(&ctl->tree_lock);
 
                __btrfs_remove_free_space_cache(ctl);
                btrfs_warn(fs_info,
                           "block group %llu has wrong amount of free space",
-                          block_group->key.objectid);
+                          block_group->start);
                ret = -1;
        }
 out:
 
                btrfs_warn(fs_info,
                           "failed to load free space cache for block group %llu, rebuilding it now",
-                          block_group->key.objectid);
+                          block_group->start);
        }
 
        iput(inode);
         */
        unpin = block_group->fs_info->pinned_extents;
 
-       start = block_group->key.objectid;
+       start = block_group->start;
 
-       while (start < block_group->key.objectid + block_group->key.offset) {
+       while (start < block_group->start + block_group->length) {
                ret = find_first_extent_bit(unpin, start,
                                            &extent_start, &extent_end,
                                            EXTENT_DIRTY, NULL);
                        return 0;
 
                /* This pinned extent is out of our range */
-               if (extent_start >= block_group->key.objectid +
-                   block_group->key.offset)
+               if (extent_start >= block_group->start + block_group->length)
                        return 0;
 
                extent_start = max(extent_start, start);
-               extent_end = min(block_group->key.objectid +
-                                block_group->key.offset, extent_end + 1);
+               extent_end = min(block_group->start + block_group->length,
+                                extent_end + 1);
                len = extent_end - extent_start;
 
                *entries += 1;
 #ifdef DEBUG
                        btrfs_err(root->fs_info,
                                  "failed to write free space cache for block group %llu",
-                                 block_group->key.objectid);
+                                 block_group->start);
 #endif
                }
        }
 {
        return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
                                     block_group, &block_group->io_ctl,
-                                    path, block_group->key.objectid);
+                                    path, block_group->start);
 }
 
 /**
 #ifdef DEBUG
                btrfs_err(fs_info,
                          "failed to write free space cache for block group %llu",
-                         block_group->key.objectid);
+                         block_group->start);
 #endif
                spin_lock(&block_group->lock);
                block_group->disk_cache_state = BTRFS_DC_ERROR;
        u64 max_bytes;
        u64 bitmap_bytes;
        u64 extent_bytes;
-       u64 size = block_group->key.offset;
+       u64 size = block_group->length;
        u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
        u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
 
         * so allow those block groups to still be allowed to have a bitmap
         * entry.
         */
-       if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
+       if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
                return false;
 
        return true;
 
        spin_lock_init(&ctl->tree_lock);
        ctl->unit = fs_info->sectorsize;
-       ctl->start = block_group->key.objectid;
+       ctl->start = block_group->start;
        ctl->private = block_group;
        ctl->op = &free_space_op;
        INIT_LIST_HEAD(&ctl->trimming_ranges);
                mutex_lock(&fs_info->chunk_mutex);
                em_tree = &fs_info->mapping_tree;
                write_lock(&em_tree->lock);
-               em = lookup_extent_mapping(em_tree, block_group->key.objectid,
+               em = lookup_extent_mapping(em_tree, block_group->start,
                                           1);
                BUG_ON(!em); /* logic error, can't happen */
                remove_extent_mapping(em_tree, em);
 
         * exceeds that required for using bitmaps.
         */
        bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
-       num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1,
-                             bitmap_range);
+       num_bitmaps = div_u64(cache->length + bitmap_range - 1, bitmap_range);
        bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE;
        total_bitmap_size = num_bitmaps * bitmap_size;
        cache->bitmap_high_thresh = div_u64(total_bitmap_size,
        struct extent_buffer *leaf;
        int ret;
 
-       key.objectid = block_group->key.objectid;
+       key.objectid = block_group->start;
        key.type = BTRFS_FREE_SPACE_INFO_KEY;
-       key.offset = block_group->key.offset;
+       key.offset = block_group->length;
 
        ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info));
        if (ret)
        struct btrfs_key key;
        int ret;
 
-       key.objectid = block_group->key.objectid;
+       key.objectid = block_group->start;
        key.type = BTRFS_FREE_SPACE_INFO_KEY;
-       key.offset = block_group->key.offset;
+       key.offset = block_group->length;
 
        ret = btrfs_search_slot(trans, root, &key, path, 0, cow);
        if (ret < 0)
                return ERR_PTR(ret);
        if (ret != 0) {
                btrfs_warn(fs_info, "missing free space info for %llu",
-                          block_group->key.objectid);
+                          block_group->start);
                ASSERT(0);
                return ERR_PTR(-ENOENT);
        }
        int done = 0, nr;
        int ret;
 
-       bitmap_size = free_space_bitmap_size(block_group->key.offset,
+       bitmap_size = free_space_bitmap_size(block_group->length,
                                             fs_info->sectorsize);
        bitmap = alloc_bitmap(bitmap_size);
        if (!bitmap) {
                goto out;
        }
 
-       start = block_group->key.objectid;
-       end = block_group->key.objectid + block_group->key.offset;
+       start = block_group->start;
+       end = block_group->start + block_group->length;
 
        key.objectid = end - 1;
        key.type = (u8)-1;
                        btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
 
                        if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
-                               ASSERT(found_key.objectid == block_group->key.objectid);
-                               ASSERT(found_key.offset == block_group->key.offset);
+                               ASSERT(found_key.objectid == block_group->start);
+                               ASSERT(found_key.offset == block_group->length);
                                done = 1;
                                break;
                        } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) {
        if (extent_count != expected_extent_count) {
                btrfs_err(fs_info,
                          "incorrect extent count for %llu; counted %u, expected %u",
-                         block_group->key.objectid, extent_count,
+                         block_group->start, extent_count,
                          expected_extent_count);
                ASSERT(0);
                ret = -EIO;
        int done = 0, nr;
        int ret;
 
-       bitmap_size = free_space_bitmap_size(block_group->key.offset,
+       bitmap_size = free_space_bitmap_size(block_group->length,
                                             fs_info->sectorsize);
        bitmap = alloc_bitmap(bitmap_size);
        if (!bitmap) {
                goto out;
        }
 
-       start = block_group->key.objectid;
-       end = block_group->key.objectid + block_group->key.offset;
+       start = block_group->start;
+       end = block_group->start + block_group->length;
 
        key.objectid = end - 1;
        key.type = (u8)-1;
                        btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
 
                        if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
-                               ASSERT(found_key.objectid == block_group->key.objectid);
-                               ASSERT(found_key.offset == block_group->key.offset);
+                               ASSERT(found_key.objectid == block_group->start);
+                               ASSERT(found_key.offset == block_group->length);
                                done = 1;
                                break;
                        } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
        btrfs_mark_buffer_dirty(leaf);
        btrfs_release_path(path);
 
-       nrbits = div_u64(block_group->key.offset, block_group->fs_info->sectorsize);
+       nrbits = div_u64(block_group->length, block_group->fs_info->sectorsize);
        start_bit = find_next_bit_le(bitmap, nrbits, 0);
 
        while (start_bit < nrbits) {
        if (extent_count != expected_extent_count) {
                btrfs_err(fs_info,
                          "incorrect extent count for %llu; counted %u, expected %u",
-                         block_group->key.objectid, extent_count,
+                         block_group->start, extent_count,
                          expected_extent_count);
                ASSERT(0);
                ret = -EIO;
         * Read the bit for the block immediately before the extent of space if
         * that block is within the block group.
         */
-       if (start > block_group->key.objectid) {
+       if (start > block_group->start) {
                u64 prev_block = start - block_group->fs_info->sectorsize;
 
                key.objectid = prev_block;
         * Read the bit for the block immediately after the extent of space if
         * that block is within the block group.
         */
-       if (end < block_group->key.objectid + block_group->key.offset) {
+       if (end < block_group->start + block_group->length) {
                /* The next block may be in the next bitmap. */
                btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
                if (end >= key.objectid + key.offset) {
        new_key.offset = size;
 
        /* Search for a neighbor on the left. */
-       if (start == block_group->key.objectid)
+       if (start == block_group->start)
                goto right;
        key.objectid = start - 1;
        key.type = (u8)-1;
 
        found_start = key.objectid;
        found_end = key.objectid + key.offset;
-       ASSERT(found_start >= block_group->key.objectid &&
-              found_end > block_group->key.objectid);
+       ASSERT(found_start >= block_group->start &&
+              found_end > block_group->start);
        ASSERT(found_start < start && found_end <= start);
 
        /*
 
 right:
        /* Search for a neighbor on the right. */
-       if (end == block_group->key.objectid + block_group->key.offset)
+       if (end == block_group->start + block_group->length)
                goto insert;
        key.objectid = end;
        key.type = (u8)-1;
 
        found_start = key.objectid;
        found_end = key.objectid + key.offset;
-       ASSERT(found_start >= block_group->key.objectid &&
-              found_end > block_group->key.objectid);
+       ASSERT(found_start >= block_group->start &&
+              found_end > block_group->start);
        ASSERT((found_start < start && found_end <= start) ||
               (found_start >= end && found_end > end));
 
         * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's
         * contained in.
         */
-       key.objectid = block_group->key.objectid;
+       key.objectid = block_group->start;
        key.type = BTRFS_EXTENT_ITEM_KEY;
        key.offset = 0;
 
                goto out_locked;
        ASSERT(ret == 0);
 
-       start = block_group->key.objectid;
-       end = block_group->key.objectid + block_group->key.offset;
+       start = block_group->start;
+       end = block_group->start + block_group->length;
        while (1) {
                btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
 
                        else
                                start += key.offset;
                } else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
-                       if (key.objectid != block_group->key.objectid)
+                       if (key.objectid != block_group->start)
                                break;
                }
 
                return ret;
 
        return __add_to_free_space_tree(trans, block_group, path,
-                                       block_group->key.objectid,
-                                       block_group->key.offset);
+                                       block_group->start,
+                                       block_group->length);
 }
 
 int add_block_group_free_space(struct btrfs_trans_handle *trans,
                goto out;
        }
 
-       start = block_group->key.objectid;
-       end = block_group->key.objectid + block_group->key.offset;
+       start = block_group->start;
+       end = block_group->start + block_group->length;
 
        key.objectid = end - 1;
        key.type = (u8)-1;
                        btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
 
                        if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
-                               ASSERT(found_key.objectid == block_group->key.objectid);
-                               ASSERT(found_key.offset == block_group->key.offset);
+                               ASSERT(found_key.objectid == block_group->start);
+                               ASSERT(found_key.offset == block_group->length);
                                done = 1;
                                nr++;
                                path->slots[0]--;
        fs_info = block_group->fs_info;
        root = fs_info->free_space_root;
 
-       end = block_group->key.objectid + block_group->key.offset;
+       end = block_group->start + block_group->length;
 
        while (1) {
                ret = btrfs_next_item(root, path);
        if (extent_count != expected_extent_count) {
                btrfs_err(fs_info,
                          "incorrect extent count for %llu; counted %u, expected %u",
-                         block_group->key.objectid, extent_count,
+                         block_group->start, extent_count,
                          expected_extent_count);
                ASSERT(0);
                ret = -EIO;
        fs_info = block_group->fs_info;
        root = fs_info->free_space_root;
 
-       end = block_group->key.objectid + block_group->key.offset;
+       end = block_group->start + block_group->length;
 
        while (1) {
                ret = btrfs_next_item(root, path);
        if (extent_count != expected_extent_count) {
                btrfs_err(fs_info,
                          "incorrect extent count for %llu; counted %u, expected %u",
-                         block_group->key.objectid, extent_count,
+                         block_group->start, extent_count,
                          expected_extent_count);
                ASSERT(0);
                ret = -EIO;
 
        space->flags = 0;
        list_for_each_entry(block_group, groups_list, list) {
                space->flags = block_group->flags;
-               space->total_bytes += block_group->key.offset;
+               space->total_bytes += block_group->length;
                space->used_bytes += block_group->used;
        }
 }
 
        if (!cache)
                return NULL;
 
-       start = cache->key.objectid;
-       end = start + cache->key.offset - 1;
+       start = cache->start;
+       end = start + cache->length - 1;
        btrfs_put_block_group(cache);
 
        zone = kzalloc(sizeof(*zone), GFP_KERNEL);
 
 static int in_block_group(u64 bytenr,
                          struct btrfs_block_group_cache *block_group)
 {
-       if (bytenr >= block_group->key.objectid &&
-           bytenr < block_group->key.objectid + block_group->key.offset)
+       if (bytenr >= block_group->start &&
+           bytenr < block_group->start + block_group->length)
                return 1;
        return 0;
 }
        u64 start, end, last;
        int ret;
 
-       last = rc->block_group->key.objectid + rc->block_group->key.offset;
+       last = rc->block_group->start + rc->block_group->length;
        while (1) {
                cond_resched();
                if (rc->search_start >= last) {
                return -ENOMEM;
 
        memset(&rc->cluster, 0, sizeof(rc->cluster));
-       rc->search_start = rc->block_group->key.objectid;
+       rc->search_start = rc->block_group->start;
        rc->extents_found = 0;
        rc->nodes_relocated = 0;
        rc->merging_rsv_size = 0;
        key.offset = 0;
        inode = btrfs_iget(fs_info->sb, &key, root);
        BUG_ON(IS_ERR(inode));
-       BTRFS_I(inode)->index_cnt = group->key.objectid;
+       BTRFS_I(inode)->index_cnt = group->start;
 
        err = btrfs_orphan_add(trans, BTRFS_I(inode));
 out:
 
        btrfs_info(fs_info,
                   "relocating block group %llu flags %s",
-                  block_group->key.objectid, buf);
+                  block_group->start, buf);
 }
 
 /*
        btrfs_wait_block_group_reservations(rc->block_group);
        btrfs_wait_nocow_writers(rc->block_group);
        btrfs_wait_ordered_roots(fs_info, U64_MAX,
-                                rc->block_group->key.objectid,
-                                rc->block_group->key.offset);
+                                rc->block_group->start,
+                                rc->block_group->length);
 
        while (1) {
                mutex_lock(&fs_info->cleaner_mutex);
 
         * round_down() can only handle power of 2, while RAID56 full
         * stripe length can be 64KiB * n, so we need to manually round down.
         */
-       ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
-               cache->full_stripe_len + cache->key.objectid;
+       ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
+                       cache->full_stripe_len + cache->start;
        return ret;
 }
 
                        btrfs_wait_block_group_reservations(cache);
                        btrfs_wait_nocow_writers(cache);
                        ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
-                                                      cache->key.objectid,
-                                                      cache->key.offset);
+                                                      cache->start,
+                                                      cache->length);
                        if (ret > 0) {
                                struct btrfs_trans_handle *trans;
 
 
                spin_lock(&cache->lock);
                btrfs_info(fs_info,
                        "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
-                       cache->key.objectid, cache->key.offset,
-                       cache->used, cache->pinned,
+                       cache->start, cache->length, cache->used, cache->pinned,
                        cache->reserved, cache->ro ? "[readonly]" : "");
                btrfs_dump_free_space(cache, bytes);
                spin_unlock(&cache->lock);
 
        down_read(&sinfo->groups_sem);
        list_for_each_entry(block_group, &sinfo->block_groups[index], list) {
                if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes))
-                       val += block_group->key.offset;
+                       val += block_group->length;
                else
                        val += block_group->used;
        }
 
                return NULL;
        }
 
-       cache->key.objectid = 0;
-       cache->key.offset = length;
-       cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+       cache->start = 0;
+       cache->length = length;
        cache->full_stripe_len = fs_info->sectorsize;
        cache->fs_info = fs_info;
 
 
        if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
                if (path->slots[0] != 0)
                        goto invalid;
-               end = cache->key.objectid + cache->key.offset;
+               end = cache->start + cache->length;
                i = 0;
                while (++path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
                        btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
                                  u32 alignment)
 {
        const struct free_space_extent extents[] = {
-               {cache->key.objectid, cache->key.offset},
+               {cache->start, cache->length},
        };
 
        return check_free_space_extents(trans, fs_info, cache, path,
        int ret;
 
        ret = __remove_from_free_space_tree(trans, cache, path,
-                                           cache->key.objectid,
-                                           cache->key.offset);
+                                           cache->start,
+                                           cache->length);
        if (ret) {
                test_err("could not remove free space");
                return ret;
                                 u32 alignment)
 {
        const struct free_space_extent extents[] = {
-               {cache->key.objectid + alignment,
-                       cache->key.offset - alignment},
+               {cache->start + alignment, cache->length - alignment},
        };
        int ret;
 
        ret = __remove_from_free_space_tree(trans, cache, path,
-                                           cache->key.objectid, alignment);
+                                           cache->start, alignment);
        if (ret) {
                test_err("could not remove free space");
                return ret;
                           u32 alignment)
 {
        const struct free_space_extent extents[] = {
-               {cache->key.objectid, cache->key.offset - alignment},
+               {cache->start, cache->length - alignment},
        };
        int ret;
 
        ret = __remove_from_free_space_tree(trans, cache, path,
-                                           cache->key.objectid +
-                                           cache->key.offset - alignment,
-                                           alignment);
+                                   cache->start + cache->length - alignment,
+                                   alignment);
        if (ret) {
                test_err("could not remove free space");
                return ret;
                              u32 alignment)
 {
        const struct free_space_extent extents[] = {
-               {cache->key.objectid, alignment},
-               {cache->key.objectid + 2 * alignment,
-                       cache->key.offset - 2 * alignment},
+               {cache->start, alignment},
+               {cache->start + 2 * alignment, cache->length - 2 * alignment},
        };
        int ret;
 
        ret = __remove_from_free_space_tree(trans, cache, path,
-                                           cache->key.objectid + alignment,
+                                           cache->start + alignment,
                                            alignment);
        if (ret) {
                test_err("could not remove free space");
                           u32 alignment)
 {
        const struct free_space_extent extents[] = {
-               {cache->key.objectid, 2 * alignment},
+               {cache->start, 2 * alignment},
        };
        int ret;
 
        ret = __remove_from_free_space_tree(trans, cache, path,
-                                           cache->key.objectid,
-                                           cache->key.offset);
+                                           cache->start, cache->length);
        if (ret) {
                test_err("could not remove free space");
                return ret;
        }
 
-       ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
+       ret = __add_to_free_space_tree(trans, cache, path, cache->start,
                                       alignment);
        if (ret) {
                test_err("could not add free space");
        }
 
        ret = __add_to_free_space_tree(trans, cache, path,
-                                      cache->key.objectid + alignment,
+                                      cache->start + alignment,
                                       alignment);
        if (ret) {
                test_err("could not add free space");
                           u32 alignment)
 {
        const struct free_space_extent extents[] = {
-               {cache->key.objectid + alignment, 2 * alignment},
+               {cache->start + alignment, 2 * alignment},
        };
        int ret;
 
        ret = __remove_from_free_space_tree(trans, cache, path,
-                                           cache->key.objectid,
-                                           cache->key.offset);
+                                           cache->start, cache->length);
        if (ret) {
                test_err("could not remove free space");
                return ret;
        }
 
        ret = __add_to_free_space_tree(trans, cache, path,
-                                      cache->key.objectid + 2 * alignment,
+                                      cache->start + 2 * alignment,
                                       alignment);
        if (ret) {
                test_err("could not add free space");
        }
 
        ret = __add_to_free_space_tree(trans, cache, path,
-                                      cache->key.objectid + alignment,
+                                      cache->start + alignment,
                                       alignment);
        if (ret) {
                test_err("could not add free space");
                           u32 alignment)
 {
        const struct free_space_extent extents[] = {
-               {cache->key.objectid, 3 * alignment},
+               {cache->start, 3 * alignment},
        };
        int ret;
 
        ret = __remove_from_free_space_tree(trans, cache, path,
-                                           cache->key.objectid,
-                                           cache->key.offset);
+                                           cache->start, cache->length);
        if (ret) {
                test_err("could not remove free space");
                return ret;
        }
 
-       ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
+       ret = __add_to_free_space_tree(trans, cache, path, cache->start,
                                       alignment);
        if (ret) {
                test_err("could not add free space");
        }
 
        ret = __add_to_free_space_tree(trans, cache, path,
-                                      cache->key.objectid + 2 * alignment,
-                                      alignment);
+                                      cache->start + 2 * alignment, alignment);
        if (ret) {
                test_err("could not add free space");
                return ret;
        }
 
        ret = __add_to_free_space_tree(trans, cache, path,
-                                      cache->key.objectid + alignment,
-                                      alignment);
+                                      cache->start + alignment, alignment);
        if (ret) {
                test_err("could not add free space");
                return ret;
                           u32 alignment)
 {
        const struct free_space_extent extents[] = {
-               {cache->key.objectid, alignment},
-               {cache->key.objectid + 2 * alignment, alignment},
-               {cache->key.objectid + 4 * alignment, alignment},
+               {cache->start, alignment},
+               {cache->start + 2 * alignment, alignment},
+               {cache->start + 4 * alignment, alignment},
        };
        int ret;
 
        ret = __remove_from_free_space_tree(trans, cache, path,
-                                           cache->key.objectid,
-                                           cache->key.offset);
+                                           cache->start, cache->length);
        if (ret) {
                test_err("could not remove free space");
                return ret;
        }
 
-       ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
+       ret = __add_to_free_space_tree(trans, cache, path, cache->start,
                                       alignment);
        if (ret) {
                test_err("could not add free space");
        }
 
        ret = __add_to_free_space_tree(trans, cache, path,
-                                      cache->key.objectid + 4 * alignment,
-                                      alignment);
+                                      cache->start + 4 * alignment, alignment);
        if (ret) {
                test_err("could not add free space");
                return ret;
        }
 
        ret = __add_to_free_space_tree(trans, cache, path,
-                                      cache->key.objectid + 2 * alignment,
-                                      alignment);
+                                      cache->start + 2 * alignment, alignment);
        if (ret) {
                test_err("could not add free space");
                return ret;
 
        if (bargs->usage_min == 0)
                user_thresh_min = 0;
        else
-               user_thresh_min = div_factor_fine(cache->key.offset,
-                                       bargs->usage_min);
+               user_thresh_min = div_factor_fine(cache->length,
+                                                 bargs->usage_min);
 
        if (bargs->usage_max == 0)
                user_thresh_max = 1;
        else if (bargs->usage_max > 100)
-               user_thresh_max = cache->key.offset;
+               user_thresh_max = cache->length;
        else
-               user_thresh_max = div_factor_fine(cache->key.offset,
-                                       bargs->usage_max);
+               user_thresh_max = div_factor_fine(cache->length,
+                                                 bargs->usage_max);
 
        if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
                ret = 0;
        if (bargs->usage_min == 0)
                user_thresh = 1;
        else if (bargs->usage > 100)
-               user_thresh = cache->key.offset;
+               user_thresh = cache->length;
        else
-               user_thresh = div_factor_fine(cache->key.offset,
-                                             bargs->usage);
+               user_thresh = div_factor_fine(cache->length, bargs->usage);
 
        if (chunk_used < user_thresh)
                ret = 0;
 
        ),
 
        TP_fast_assign_btrfs(fs_info,
-               __entry->offset         = block_group->key.objectid;
-               __entry->size           = block_group->key.offset;
+               __entry->offset         = block_group->start;
+               __entry->size           = block_group->length;
                __entry->flags          = block_group->flags;
                __entry->bytes_used     = block_group->used;
                __entry->bytes_super    = block_group->bytes_super;
        ),
 
        TP_fast_assign_btrfs(block_group->fs_info,
-               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->bg_objectid    = block_group->start;
                __entry->flags          = block_group->flags;
                __entry->start          = start;
                __entry->len            = len;
        ),
 
        TP_fast_assign_btrfs(block_group->fs_info,
-               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->bg_objectid    = block_group->start;
                __entry->flags          = block_group->flags;
                __entry->start          = start;
                __entry->bytes          = bytes;
        ),
 
        TP_fast_assign_btrfs(block_group->fs_info,
-               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->bg_objectid    = block_group->start;
        ),
 
        TP_printk_btrfs("block_group=%llu", __entry->bg_objectid)
        ),
 
        TP_fast_assign_btrfs(block_group->fs_info,
-               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->bg_objectid    = block_group->start;
                __entry->flags          = block_group->flags;
                __entry->start          = cluster->window_start;
                __entry->max_size       = cluster->max_size;
        ),
 
        TP_fast_assign_btrfs(bg_cache->fs_info,
-               __entry->bytenr = bg_cache->key.objectid,
-               __entry->len    = bg_cache->key.offset,
+               __entry->bytenr = bg_cache->start,
+               __entry->len    = bg_cache->length,
                __entry->used   = bg_cache->used;
                __entry->flags  = bg_cache->flags;
        ),