WARN_ON(cache->caching_ctl);
        cache->caching_ctl = caching_ctl;
        cache->cached = BTRFS_CACHE_STARTED;
-       cache->has_caching_ctl = 1;
+       set_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &cache->runtime_flags);
        spin_unlock(&cache->lock);
 
        write_lock(&fs_info->block_group_cache_lock);
                kobject_put(kobj);
        }
 
-       if (block_group->has_caching_ctl)
+
+       if (test_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &block_group->runtime_flags))
                caching_ctl = btrfs_get_caching_control(block_group);
        if (block_group->cached == BTRFS_CACHE_STARTED)
                btrfs_wait_block_group_cache_done(block_group);
-       if (block_group->has_caching_ctl) {
+       if (test_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &block_group->runtime_flags)) {
                write_lock(&fs_info->block_group_cache_lock);
                if (!caching_ctl) {
                        struct btrfs_caching_control *ctl;
                        < block_group->zone_unusable);
                WARN_ON(block_group->space_info->disk_total
                        < block_group->length * factor);
-               WARN_ON(block_group->zone_is_active &&
+               WARN_ON(test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+                                &block_group->runtime_flags) &&
                        block_group->space_info->active_total_bytes
                        < block_group->length);
        }
        block_group->space_info->total_bytes -= block_group->length;
-       if (block_group->zone_is_active)
+       if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
                block_group->space_info->active_total_bytes -= block_group->length;
        block_group->space_info->bytes_readonly -=
                (block_group->length - block_group->zone_unusable);
                goto out;
 
        spin_lock(&block_group->lock);
-       block_group->removed = 1;
+       set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
+
        /*
         * At this point trimming or scrub can't start on this block group,
         * because we removed the block group from the rbtree
                ret = insert_block_group_item(trans, block_group);
                if (ret)
                        btrfs_abort_transaction(trans, ret);
-               if (!block_group->chunk_item_inserted) {
+               if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
+                             &block_group->runtime_flags)) {
                        mutex_lock(&fs_info->chunk_mutex);
                        ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
                        mutex_unlock(&fs_info->chunk_mutex);
                while (block_group) {
                        btrfs_wait_block_group_cache_done(block_group);
                        spin_lock(&block_group->lock);
-                       if (block_group->iref)
+                       if (test_bit(BLOCK_GROUP_FLAG_IREF,
+                                    &block_group->runtime_flags))
                                break;
                        spin_unlock(&block_group->lock);
                        block_group = btrfs_next_block_group(block_group);
                }
 
                inode = block_group->inode;
-               block_group->iref = 0;
+               clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags);
                block_group->inode = NULL;
                spin_unlock(&block_group->lock);
                ASSERT(block_group->io_ctl.inode == NULL);
 
        spin_lock(&block_group->lock);
        cleanup = (atomic_dec_and_test(&block_group->frozen) &&
-                  block_group->removed);
+                  test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags));
        spin_unlock(&block_group->lock);
 
        if (cleanup) {
 
        CHUNK_ALLOC_FORCE_FOR_EXTENT,
 };
 
+/* Block group flags set at runtime */
+enum btrfs_block_group_flags {
+       BLOCK_GROUP_FLAG_IREF,
+       BLOCK_GROUP_FLAG_HAS_CACHING_CTL,
+       BLOCK_GROUP_FLAG_REMOVED,
+       BLOCK_GROUP_FLAG_TO_COPY,
+       BLOCK_GROUP_FLAG_RELOCATING_REPAIR,
+       BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
+       BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+       BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
+};
+
 struct btrfs_caching_control {
        struct list_head list;
        struct mutex mutex;
 
        /* For raid56, this is a full stripe, without parity */
        unsigned long full_stripe_len;
+       unsigned long runtime_flags;
 
        unsigned int ro;
-       unsigned int iref:1;
-       unsigned int has_caching_ctl:1;
-       unsigned int removed:1;
-       unsigned int to_copy:1;
-       unsigned int relocating_repair:1;
-       unsigned int chunk_item_inserted:1;
-       unsigned int zone_is_active:1;
-       unsigned int zoned_data_reloc_ongoing:1;
 
        int disk_cache_state;
 
 
                        continue;
 
                spin_lock(&cache->lock);
-               cache->to_copy = 1;
+               set_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
                spin_unlock(&cache->lock);
 
                btrfs_put_block_group(cache);
                return true;
 
        spin_lock(&cache->lock);
-       if (cache->removed) {
+       if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
                spin_unlock(&cache->lock);
                return true;
        }
 
        /* Last stripe on this device */
        spin_lock(&cache->lock);
-       cache->to_copy = 0;
+       clear_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
        spin_unlock(&cache->lock);
 
        return true;
 
               block_group->start == fs_info->data_reloc_bg ||
               fs_info->data_reloc_bg == 0);
 
-       if (block_group->ro || block_group->zoned_data_reloc_ongoing) {
+       if (block_group->ro ||
+           test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
                ret = 1;
                goto out;
        }
                 * regular extents) at the same time to the same zone, which
                 * easily break the write pointer.
                 */
-               block_group->zoned_data_reloc_ongoing = 1;
+               set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
                fs_info->data_reloc_bg = 0;
        }
        spin_unlock(&fs_info->relocation_bg_lock);
 
                block_group->disk_cache_state = BTRFS_DC_CLEAR;
        }
 
-       if (!block_group->iref) {
+       if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags))
                block_group->inode = igrab(inode);
-               block_group->iref = 1;
-       }
        spin_unlock(&block_group->lock);
 
        return inode;
        clear_nlink(inode);
        /* One for the block groups ref */
        spin_lock(&block_group->lock);
-       if (block_group->iref) {
-               block_group->iref = 0;
+       if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) {
                block_group->inode = NULL;
                spin_unlock(&block_group->lock);
                iput(inode);
        if (btrfs_is_zoned(fs_info)) {
                btrfs_info(fs_info, "free space %llu active %d",
                           block_group->zone_capacity - block_group->alloc_offset,
-                          block_group->zone_is_active);
+                          test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+                                   &block_group->runtime_flags));
                return;
        }
 
        *trimmed = 0;
 
        spin_lock(&block_group->lock);
-       if (block_group->removed) {
+       if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
                spin_unlock(&block_group->lock);
                return 0;
        }
        *trimmed = 0;
 
        spin_lock(&block_group->lock);
-       if (block_group->removed) {
+       if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
                spin_unlock(&block_group->lock);
                return 0;
        }
        *trimmed = 0;
 
        spin_lock(&block_group->lock);
-       if (block_group->removed) {
+       if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
                spin_unlock(&block_group->lock);
                return 0;
        }
 
                }
                /* Block group removed? */
                spin_lock(&bg->lock);
-               if (bg->removed) {
+               if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
                        spin_unlock(&bg->lock);
                        ret = 0;
                        break;
                 * kthread or relocation.
                 */
                spin_lock(&bg->lock);
-               if (!bg->removed)
+               if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
                        ret = -EINVAL;
                spin_unlock(&bg->lock);
 
 
                if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
                        spin_lock(&cache->lock);
-                       if (!cache->to_copy) {
+                       if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
                                spin_unlock(&cache->lock);
                                btrfs_put_block_group(cache);
                                goto skip;
                 * repair extents.
                 */
                spin_lock(&cache->lock);
-               if (cache->removed) {
+               if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
                        spin_unlock(&cache->lock);
                        btrfs_put_block_group(cache);
                        goto skip;
                 * balance is triggered or it becomes used and unused again.
                 */
                spin_lock(&cache->lock);
-               if (!cache->removed && !cache->ro && cache->reserved == 0 &&
-                   cache->used == 0) {
+               if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
+                   !cache->ro && cache->reserved == 0 && cache->used == 0) {
                        spin_unlock(&cache->lock);
                        if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
                                btrfs_discard_queue_work(&fs_info->discard_ctl,
 
        ASSERT(found);
        spin_lock(&found->lock);
        found->total_bytes += block_group->length;
-       if (block_group->zone_is_active)
+       if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
                found->active_total_bytes += block_group->length;
        found->disk_total += block_group->length * factor;
        found->bytes_used += block_group->used;
 
        if (ret)
                goto out;
 
-       bg->chunk_item_inserted = 1;
+       set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags);
 
        if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
                ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
        cache = btrfs_lookup_block_group(fs_info, logical);
 
        spin_lock(&cache->lock);
-       ret = cache->to_copy;
+       ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
        spin_unlock(&cache->lock);
 
        btrfs_put_block_group(cache);
        if (!cache)
                goto out;
 
-       if (!cache->relocating_repair)
+       if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags))
                goto out;
 
        ret = btrfs_may_alloc_data_chunk(fs_info, target);
                return true;
 
        spin_lock(&cache->lock);
-       if (cache->relocating_repair) {
+       if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) {
                spin_unlock(&cache->lock);
                btrfs_put_block_group(cache);
                return true;
        }
-       cache->relocating_repair = 1;
        spin_unlock(&cache->lock);
 
        kthread_run(relocating_repair_kthread, cache,
 
                        goto out;
                } else if (map->num_stripes == num_conventional) {
                        cache->alloc_offset = last_alloc;
-                       cache->zone_is_active = 1;
+                       set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
                        goto out;
                }
        }
                }
                cache->alloc_offset = alloc_offsets[0];
                cache->zone_capacity = caps[0];
-               cache->zone_is_active = test_bit(0, active);
+               if (test_bit(0, active))
+                       set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
                break;
        case BTRFS_BLOCK_GROUP_DUP:
                if (map->type & BTRFS_BLOCK_GROUP_DATA) {
                                goto out;
                        }
                } else {
-                       cache->zone_is_active = test_bit(0, active);
+                       if (test_bit(0, active))
+                               set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+                                       &cache->runtime_flags);
                }
                cache->alloc_offset = alloc_offsets[0];
                cache->zone_capacity = min(caps[0], caps[1]);
 
        if (!ret) {
                cache->meta_write_pointer = cache->alloc_offset + cache->start;
-               if (cache->zone_is_active) {
+               if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
                        btrfs_get_block_group(cache);
                        spin_lock(&fs_info->zone_active_bgs_lock);
                        list_add_tail(&cache->active_bg_list,
 
        spin_lock(&space_info->lock);
        spin_lock(&block_group->lock);
-       if (block_group->zone_is_active) {
+       if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
                ret = true;
                goto out_unlock;
        }
        }
 
        /* Successfully activated all the zones */
-       block_group->zone_is_active = 1;
+       set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
        space_info->active_total_bytes += block_group->length;
        spin_unlock(&block_group->lock);
        btrfs_try_granting_tickets(fs_info, space_info);
        int i;
 
        spin_lock(&block_group->lock);
-       if (!block_group->zone_is_active) {
+       if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
                spin_unlock(&block_group->lock);
                return 0;
        }
                 * Bail out if someone already deactivated the block group, or
                 * allocated space is left in the block group.
                 */
-               if (!block_group->zone_is_active) {
+               if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+                             &block_group->runtime_flags)) {
                        spin_unlock(&block_group->lock);
                        btrfs_dec_block_group_ro(block_group);
                        return 0;
                }
        }
 
-       block_group->zone_is_active = 0;
+       clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
        block_group->alloc_offset = block_group->zone_capacity;
        block_group->free_space_ctl->free_space = 0;
        btrfs_clear_treelog_bg(block_group);
        ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
 
        spin_lock(&block_group->lock);
-       if (!block_group->zoned_data_reloc_ongoing)
+       if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
                goto out;
 
        /* All relocation extents are written. */
        if (block_group->start + block_group->alloc_offset == logical + length) {
                /* Now, release this block group for further allocations. */
-               block_group->zoned_data_reloc_ongoing = 0;
+               clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
+                         &block_group->runtime_flags);
        }
 
 out:
                                            list) {
                                if (!spin_trylock(&bg->lock))
                                        continue;
-                               if (btrfs_zoned_bg_is_full(bg) || bg->zone_is_active) {
+                               if (btrfs_zoned_bg_is_full(bg) ||
+                                   test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+                                            &bg->runtime_flags)) {
                                        spin_unlock(&bg->lock);
                                        continue;
                                }