return 0;
 }
 
-/*
- * Reduce bio and io accounting for a compressed_bio with its corresponding bio.
- *
- * Return true if there is no pending bio nor io.
- * Return false otherwise.
- */
-static bool dec_and_test_compressed_bio(struct compressed_bio *cb, struct bio *bio)
-{
-       struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
-       unsigned int bi_size = 0;
-       bool last_io = false;
-       struct bio_vec *bvec;
-       struct bvec_iter_all iter_all;
-
-       /*
-        * At endio time, bi_iter.bi_size doesn't represent the real bio size.
-        * Thus here we have to iterate through all segments to grab correct
-        * bio size.
-        */
-       bio_for_each_segment_all(bvec, bio, iter_all)
-               bi_size += bvec->bv_len;
-
-       if (bio->bi_status)
-               cb->status = bio->bi_status;
-
-       ASSERT(bi_size && bi_size <= cb->compressed_len);
-       last_io = refcount_sub_and_test(bi_size >> fs_info->sectorsize_bits,
-                                       &cb->pending_sectors);
-       /*
-        * Here we must wake up the possible error handler after all other
-        * operations on @cb finished, or we can race with
-        * finish_compressed_bio_*() which may free @cb.
-        */
-       wake_up_var(cb);
-
-       return last_io;
-}
-
 static void finish_compressed_bio_read(struct compressed_bio *cb)
 {
        unsigned int index;
        unsigned int mirror = btrfs_bio(bio)->mirror_num;
        int ret = 0;
 
-       if (!dec_and_test_compressed_bio(cb, bio))
+       if (bio->bi_status)
+               cb->status = bio->bi_status;
+
+       if (!refcount_dec_and_test(&cb->pending_ios))
                goto out;
 
        /*
 {
        struct compressed_bio *cb = bio->bi_private;
 
-       if (dec_and_test_compressed_bio(cb, bio)) {
+       if (bio->bi_status)
+               cb->status = bio->bi_status;
+
+       if (refcount_dec_and_test(&cb->pending_ios)) {
                struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
 
                btrfs_record_physical_zoned(cb->inode, cb->start, bio);
                return ERR_PTR(ret);
        }
        *next_stripe_start = disk_bytenr + geom.len;
-
+       refcount_inc(&cb->pending_ios);
        return bio;
 }
 
        struct compressed_bio *cb;
        u64 cur_disk_bytenr = disk_start;
        u64 next_stripe_start;
-       blk_status_t ret;
+       blk_status_t ret = BLK_STS_OK;
        int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
        const bool use_append = btrfs_use_zone_append(inode, disk_start);
        const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
        if (!cb)
                return BLK_STS_RESOURCE;
-       refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
+       refcount_set(&cb->pending_ios, 1);
        cb->status = BLK_STS_OK;
        cb->inode = &inode->vfs_inode;
        cb->start = start;
                                &next_stripe_start);
                        if (IS_ERR(bio)) {
                                ret = errno_to_blk_status(PTR_ERR(bio));
-                               bio = NULL;
-                               goto finish_cb;
+                               break;
                        }
                        if (blkcg_css)
                                bio->bi_opf |= REQ_CGROUP_PUNT;
                if (submit) {
                        if (!skip_sum) {
                                ret = btrfs_csum_one_bio(inode, bio, start, true);
-                               if (ret)
-                                       goto finish_cb;
+                               if (ret) {
+                                       bio->bi_status = ret;
+                                       bio_endio(bio);
+                                       break;
+                               }
                        }
 
                        ASSERT(bio->bi_iter.bi_size);
                }
                cond_resched();
        }
-       if (blkcg_css)
-               kthread_associate_blkcg(NULL);
 
-       return 0;
-
-finish_cb:
        if (blkcg_css)
                kthread_associate_blkcg(NULL);
 
-       if (bio) {
-               bio->bi_status = ret;
-               bio_endio(bio);
-       }
-       /* Last byte of @cb is submitted, endio will free @cb */
-       if (cur_disk_bytenr == disk_start + compressed_len)
-               return ret;
-
-       wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
-                          (disk_start + compressed_len - cur_disk_bytenr) >>
-                          fs_info->sectorsize_bits);
-       /*
-        * Even with previous bio ended, we should still have io not yet
-        * submitted, thus need to finish manually.
-        */
-       ASSERT(refcount_read(&cb->pending_sectors));
-       /* Now we are the only one referring @cb, can finish it safely. */
-       finish_compressed_bio_write(cb);
+       if (refcount_dec_and_test(&cb->pending_ios))
+               finish_compressed_bio_write(cb);
        return ret;
 }
 
                goto out;
        }
 
-       refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
+       refcount_set(&cb->pending_ios, 1);
        cb->status = BLK_STS_OK;
        cb->inode = inode;
        cb->mirror_num = mirror_num;
                                        REQ_OP_READ, end_compressed_bio_read,
                                        &next_stripe_start);
                        if (IS_ERR(comp_bio)) {
-                               ret = errno_to_blk_status(PTR_ERR(comp_bio));
-                               comp_bio = NULL;
-                               goto finish_cb;
+                               cb->status = errno_to_blk_status(PTR_ERR(comp_bio));
+                               break;
                        }
                }
                /*
                        unsigned int nr_sectors;
 
                        ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
-                       if (ret)
-                               goto finish_cb;
+                       if (ret) {
+                               comp_bio->bi_status = ret;
+                               bio_endio(comp_bio);
+                               break;
+                       }
 
                        nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
                                                  fs_info->sectorsize);
                        comp_bio = NULL;
                }
        }
+
+       if (refcount_dec_and_test(&cb->pending_ios))
+               finish_compressed_bio_read(cb);
        return;
 
 fail:
        bio->bi_status = ret;
        bio_endio(bio);
        return;
-finish_cb:
-       if (comp_bio) {
-               comp_bio->bi_status = ret;
-               bio_endio(comp_bio);
-       }
-       /* All bytes of @cb is submitted, endio will free @cb */
-       if (cur_disk_byte == disk_bytenr + compressed_len)
-               return;
-
-       wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
-                          (disk_bytenr + compressed_len - cur_disk_byte) >>
-                          fs_info->sectorsize_bits);
-       /*
-        * Even with previous bio ended, we should still have io not yet
-        * submitted, thus need to finish @cb manually.
-        */
-       ASSERT(refcount_read(&cb->pending_sectors));
-       /* Now we are the only one referring @cb, can finish it safely. */
-       finish_compressed_bio_read(cb);
 }
 
 /*