/* for raid56, this is a full stripe, without parity */
        unsigned long full_stripe_len;
 
-       unsigned int ro:1;
+       unsigned int ro;
        unsigned int iref:1;
        unsigned int has_caching_ctl:1;
        unsigned int removed:1;
 void btrfs_block_rsv_release(struct btrfs_root *root,
                             struct btrfs_block_rsv *block_rsv,
                             u64 num_bytes);
-int btrfs_set_block_group_ro(struct btrfs_root *root,
+int btrfs_inc_block_group_ro(struct btrfs_root *root,
                             struct btrfs_block_group_cache *cache);
-void btrfs_set_block_group_rw(struct btrfs_root *root,
+void btrfs_dec_block_group_ro(struct btrfs_root *root,
                              struct btrfs_block_group_cache *cache);
 void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
 
        return flags;
 }
 
-static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
+static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
 {
        struct btrfs_space_info *sinfo = cache->space_info;
        u64 num_bytes;
        u64 min_allocable_bytes;
        int ret = -ENOSPC;
 
-
        /*
         * We need some metadata space and system metadata space for
         * allocating chunks in some corner cases until we force to set
        spin_lock(&cache->lock);
 
        if (cache->ro) {
+               cache->ro++;
                ret = 0;
                goto out;
        }
            sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
            min_allocable_bytes <= sinfo->total_bytes) {
                sinfo->bytes_readonly += num_bytes;
-               cache->ro = 1;
+               cache->ro++;
                list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
                ret = 0;
        }
        return ret;
 }
 
-int btrfs_set_block_group_ro(struct btrfs_root *root,
+int btrfs_inc_block_group_ro(struct btrfs_root *root,
                             struct btrfs_block_group_cache *cache)
 
 {
        u64 alloc_flags;
        int ret;
 
-       BUG_ON(cache->ro);
-
 again:
        trans = btrfs_join_transaction(root);
        if (IS_ERR(trans))
                        goto out;
        }
 
-       ret = set_block_group_ro(cache, 0);
+       ret = inc_block_group_ro(cache, 0);
        if (!ret)
                goto out;
        alloc_flags = get_alloc_profile(root, cache->space_info->flags);
                             CHUNK_ALLOC_FORCE);
        if (ret < 0)
                goto out;
-       ret = set_block_group_ro(cache, 0);
+       ret = inc_block_group_ro(cache, 0);
 out:
        if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
                alloc_flags = update_block_group_flags(root, cache->flags);
        return free_bytes;
 }
 
-void btrfs_set_block_group_rw(struct btrfs_root *root,
+void btrfs_dec_block_group_ro(struct btrfs_root *root,
                              struct btrfs_block_group_cache *cache)
 {
        struct btrfs_space_info *sinfo = cache->space_info;
 
        spin_lock(&sinfo->lock);
        spin_lock(&cache->lock);
-       num_bytes = cache->key.offset - cache->reserved - cache->pinned -
-                   cache->bytes_super - btrfs_block_group_used(&cache->item);
-       sinfo->bytes_readonly -= num_bytes;
-       cache->ro = 0;
-       list_del_init(&cache->ro_list);
+       if (!--cache->ro) {
+               num_bytes = cache->key.offset - cache->reserved -
+                           cache->pinned - cache->bytes_super -
+                           btrfs_block_group_used(&cache->item);
+               sinfo->bytes_readonly -= num_bytes;
+               list_del_init(&cache->ro_list);
+       }
        spin_unlock(&cache->lock);
        spin_unlock(&sinfo->lock);
 }
 
                set_avail_alloc_bits(root->fs_info, cache->flags);
                if (btrfs_chunk_readonly(root, cache->key.objectid)) {
-                       set_block_group_ro(cache, 1);
+                       inc_block_group_ro(cache, 1);
                } else if (btrfs_block_group_used(&cache->item) == 0) {
                        spin_lock(&info->unused_bgs_lock);
                        /* Should always be true but just in case. */
                list_for_each_entry(cache,
                                &space_info->block_groups[BTRFS_RAID_RAID0],
                                list)
-                       set_block_group_ro(cache, 1);
+                       inc_block_group_ro(cache, 1);
                list_for_each_entry(cache,
                                &space_info->block_groups[BTRFS_RAID_SINGLE],
                                list)
-                       set_block_group_ro(cache, 1);
+                       inc_block_group_ro(cache, 1);
        }
 
        init_global_block_rsv(info);
                spin_unlock(&block_group->lock);
 
                /* We don't want to force the issue, only flip if it's ok. */
-               ret = set_block_group_ro(block_group, 0);
+               ret = inc_block_group_ro(block_group, 0);
                up_write(&space_info->groups_sem);
                if (ret < 0) {
                        ret = 0;
                /* 1 for btrfs_orphan_reserve_metadata() */
                trans = btrfs_start_transaction(root, 1);
                if (IS_ERR(trans)) {
-                       btrfs_set_block_group_rw(root, block_group);
+                       btrfs_dec_block_group_ro(root, block_group);
                        ret = PTR_ERR(trans);
                        goto next;
                }
                                  EXTENT_DIRTY, GFP_NOFS);
                if (ret) {
                        mutex_unlock(&fs_info->unused_bg_unpin_mutex);
-                       btrfs_set_block_group_rw(root, block_group);
+                       btrfs_dec_block_group_ro(root, block_group);
                        goto end_trans;
                }
                ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
                                  EXTENT_DIRTY, GFP_NOFS);
                if (ret) {
                        mutex_unlock(&fs_info->unused_bg_unpin_mutex);
-                       btrfs_set_block_group_rw(root, block_group);
+                       btrfs_dec_block_group_ro(root, block_group);
                        goto end_trans;
                }
                mutex_unlock(&fs_info->unused_bg_unpin_mutex);