if (ret)
                btrfs_err(fs_info, "kobj add dev failed %d", ret);
 
-       btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+       btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
 
        /*
         * Commit dev_replace state and reserve 1 item for it.
                mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
                return ret;
        }
-       btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+       btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
 
        /*
         * We have to use this loop approach because at this point src_device
 
         * extents that haven't had their dirty pages IO start writeout yet
         * actually get run and error out properly.
         */
-       btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+       btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
 }
 
 static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 
        atomic_inc(&root->snapshot_force_cow);
        snapshot_force_cow = true;
 
-       btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
+       btrfs_wait_ordered_extents(root, U64_MAX, NULL);
 
        ret = btrfs_mksubvol(parent, idmap, name, namelen,
                             root, readonly, inherit);
 
 #include "qgroup.h"
 #include "subpage.h"
 #include "file.h"
+#include "block-group.h"
 
 static struct kmem_cache *btrfs_ordered_extent_cache;
 
 }
 
 /*
- * wait for all the ordered extents in a root.  This is done when balancing
- * space between drives.
+ * Wait for all the ordered extents in a root. Use @bg as range or do whole
+ * range if it's NULL.
  */
 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
-                              const u64 range_start, const u64 range_len)
+                              const struct btrfs_block_group *bg)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
        LIST_HEAD(splice);
        LIST_HEAD(works);
        struct btrfs_ordered_extent *ordered, *next;
        u64 count = 0;
-       const u64 range_end = range_start + range_len;
+       u64 range_start, range_len;
+       u64 range_end;
+
+       if (bg) {
+               range_start = bg->start;
+               range_len = bg->length;
+       } else {
+               range_start = 0;
+               range_len = U64_MAX;
+       }
+       range_end = range_start + range_len;
 
        mutex_lock(&root->ordered_extent_mutex);
        spin_lock(&root->ordered_extent_lock);
        return count;
 }
 
+/*
+ * Wait for @nr ordered extents that intersect the @bg, or the whole range of
+ * the filesystem if @bg is NULL.
+ */
 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
-                            const u64 range_start, const u64 range_len)
+                             const struct btrfs_block_group *bg)
 {
        struct btrfs_root *root;
        LIST_HEAD(splice);
                               &fs_info->ordered_roots);
                spin_unlock(&fs_info->ordered_root_lock);
 
-               done = btrfs_wait_ordered_extents(root, nr,
-                                                 range_start, range_len);
+               done = btrfs_wait_ordered_extents(root, nr, bg);
                btrfs_put_root(root);
 
                spin_lock(&fs_info->ordered_root_lock);
 
 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
                                           struct list_head *list);
 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
-                              const u64 range_start, const u64 range_len);
+                              const struct btrfs_block_group *bg);
 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
-                             const u64 range_start, const u64 range_len);
+                             const struct btrfs_block_group *bg);
 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
                                        u64 end,
                                        struct extent_state **cached_state);
 
        ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
        if (ret)
                return ret;
-       btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+       btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
        trans = btrfs_join_transaction(fs_info->tree_root);
        if (IS_ERR(trans))
                return PTR_ERR(trans);
        ret = btrfs_start_delalloc_snapshot(root, true);
        if (ret < 0)
                goto out;
-       btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
+       btrfs_wait_ordered_extents(root, U64_MAX, NULL);
 
        trans = btrfs_attach_transaction_barrier(root);
        if (IS_ERR(trans)) {
 
 
        btrfs_wait_block_group_reservations(rc->block_group);
        btrfs_wait_nocow_writers(rc->block_group);
-       btrfs_wait_ordered_roots(fs_info, U64_MAX,
-                                rc->block_group->start,
-                                rc->block_group->length);
+       btrfs_wait_ordered_roots(fs_info, U64_MAX, rc->block_group);
 
        ret = btrfs_zone_finish(rc->block_group);
        WARN_ON(ret && ret != -EAGAIN);
 
 
        btrfs_wait_block_group_reservations(cache);
        btrfs_wait_nocow_writers(cache);
-       btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
+       btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
 
        trans = btrfs_join_transaction(root);
        if (IS_ERR(trans))
                 */
                if (sctx->is_dev_replace) {
                        btrfs_wait_nocow_writers(cache);
-                       btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
-                                       cache->length);
+                       btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
                }
 
                scrub_pause_off(fs_info);
 
                ret = btrfs_start_delalloc_snapshot(root, false);
                if (ret)
                        return ret;
-               btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+               btrfs_wait_ordered_extents(root, U64_MAX, NULL);
        }
 
        for (i = 0; i < sctx->clone_roots_cnt; i++) {
                ret = btrfs_start_delalloc_snapshot(root, false);
                if (ret)
                        return ret;
-               btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+               btrfs_wait_ordered_extents(root, U64_MAX, NULL);
        }
 
        return 0;
 
 skip_async:
                loops++;
                if (wait_ordered && !trans) {
-                       btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
+                       btrfs_wait_ordered_roots(fs_info, items, NULL);
                } else {
                        time_left = schedule_timeout_killable(1);
                        if (time_left)
 
                return 0;
        }
 
-       btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+       btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
 
        trans = btrfs_attach_transaction_barrier(root);
        if (IS_ERR(trans)) {
 
 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
 {
        if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
-               btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+               btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
 }
 
 /*
 
                /* Ensure all writes in this block group finish */
                btrfs_wait_block_group_reservations(block_group);
                /* No need to wait for NOCOW writers. Zoned mode does not allow that */
-               btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
-                                        block_group->length);
+               btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group);
                /* Wait for extent buffers to be written. */
                if (is_metadata)
                        wait_eb_writebacks(block_group);