static void bch2_do_invalidates_work(struct work_struct *work)
 {
        struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
-       struct bch_dev *ca;
        struct btree_trans *trans = bch2_trans_get(c);
-       unsigned i;
        int ret = 0;
 
        ret = bch2_btree_write_buffer_tryflush(trans);
        if (ret)
                goto err;
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                s64 nr_to_invalidate =
                        should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
 
 
 int bch2_fs_freespace_init(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
        int ret = 0;
        bool doing_init = false;
 
         * every mount:
         */
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                if (ca->mi.freespace_initialized)
                        continue;
 
 
 void bch2_recalc_capacity(struct bch_fs *c)
 {
-       struct bch_dev *ca;
        u64 capacity = 0, reserved_sectors = 0, gc_reserve;
        unsigned bucket_size_max = 0;
        unsigned long ra_pages = 0;
-       unsigned i;
 
        lockdep_assert_held(&c->state_lock);
 
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
 
                ra_pages += bdi->ra_pages;
 
        bch2_set_ra_pages(c, ra_pages);
 
-       for_each_rw_member(ca, c, i) {
+       for_each_rw_member(c, ca) {
                u64 dev_reserve = 0;
 
                /*
 
 u64 bch2_min_rw_member_capacity(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
        u64 ret = U64_MAX;
 
-       for_each_rw_member(ca, c, i)
+       for_each_rw_member(c, ca)
                ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
        return ret;
 }
 
 
 static void bch2_mark_superblocks(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
-
        mutex_lock(&c->sb_lock);
        gc_pos_set(c, gc_phase(GC_PHASE_SB));
 
-       for_each_online_member(ca, c, i)
+       for_each_online_member(c, ca)
                bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
        mutex_unlock(&c->sb_lock);
 }
 
 static void bch2_gc_free(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
-
        genradix_free(&c->reflink_gc_table);
        genradix_free(&c->gc_stripes);
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                kvpfree(rcu_dereference_protected(ca->buckets_gc, 1),
                        sizeof(struct bucket_array) +
                        ca->mi.nbuckets * sizeof(struct bucket));
        bool verify = !metadata_only &&
                !c->opts.reconstruct_alloc &&
                (!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
-       unsigned i, dev;
+       unsigned i;
        int ret = 0;
 
        percpu_down_write(&c->mark_lock);
                      , ##__VA_ARGS__, dst->_f, src->_f)))              \
                dst->_f = src->_f
 #define copy_dev_field(_err, _f, _msg, ...)                            \
-       copy_field(_err, _f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
+       copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__)
 #define copy_fs_field(_err, _f, _msg, ...)                             \
        copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
 
        for (i = 0; i < ARRAY_SIZE(c->usage); i++)
                bch2_fs_usage_acc_to_base(c, i);
 
-       for_each_member_device(ca, c, dev) {
+       __for_each_member_device(c, ca) {
                struct bch_dev_usage *dst = ca->usage_base;
                struct bch_dev_usage *src = (void *)
                        bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
 
 static int bch2_gc_start(struct bch_fs *c)
 {
-       struct bch_dev *ca = NULL;
-       unsigned i;
-
        BUG_ON(c->usage_gc);
 
        c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
                return -BCH_ERR_ENOMEM_gc_start;
        }
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                BUG_ON(ca->usage_gc);
 
                ca->usage_gc = alloc_percpu(struct bch_dev_usage);
 
 static int bch2_gc_reset(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
-
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                free_percpu(ca->usage_gc);
                ca->usage_gc = NULL;
        }
        enum bch_data_type type;
        int ret;
 
-       if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)))
-               return 1;
-
        old = bch2_alloc_to_v4(k, &old_convert);
        new = *old;
 
 
 static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
 {
-       struct btree_trans *trans = bch2_trans_get(c);
-       struct bch_dev *ca;
-       unsigned i;
        int ret = 0;
 
-       for_each_member_device(ca, c, i) {
-               ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
-                               POS(ca->dev_idx, ca->mi.first_bucket),
-                               BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
-                               NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
-                       bch2_alloc_write_key(trans, &iter, k, metadata_only));
-
-               if (ret < 0) {
-                       bch_err_fn(c, ret);
+       for_each_member_device(c, ca) {
+               ret = bch2_trans_run(c,
+                       for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
+                                       POS(ca->dev_idx, ca->mi.first_bucket),
+                                       POS(ca->dev_idx, ca->mi.nbuckets - 1),
+                                       BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
+                                       NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
+                               bch2_alloc_write_key(trans, &iter, k, metadata_only)));
+               if (ret) {
                        percpu_ref_put(&ca->ref);
                        break;
                }
        }
 
-       bch2_trans_put(trans);
-       return ret < 0 ? ret : 0;
+       bch_err_fn(c, ret);
+       return ret;
 }
 
 static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
 {
-       struct bch_dev *ca;
-       struct btree_trans *trans = bch2_trans_get(c);
-       struct bucket *g;
-       struct bch_alloc_v4 a_convert;
-       const struct bch_alloc_v4 *a;
-       unsigned i;
-       int ret;
-
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) +
                                ca->mi.nbuckets * sizeof(struct bucket),
                                GFP_KERNEL|__GFP_ZERO);
                if (!buckets) {
                        percpu_ref_put(&ca->ref);
                        bch_err(c, "error allocating ca->buckets[gc]");
-                       ret = -BCH_ERR_ENOMEM_gc_alloc_start;
-                       goto err;
+                       return -BCH_ERR_ENOMEM_gc_alloc_start;
                }
 
                buckets->first_bucket   = ca->mi.first_bucket;
                rcu_assign_pointer(ca->buckets_gc, buckets);
        }
 
-       ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
-                                BTREE_ITER_PREFETCH, k, ({
-               ca = bch_dev_bkey_exists(c, k.k->p.inode);
-               g = gc_bucket(ca, k.k->p.offset);
-
-               a = bch2_alloc_to_v4(k, &a_convert);
-
-               g->gen_valid    = 1;
-               g->gen          = a->gen;
-
-               if (metadata_only &&
-                   (a->data_type == BCH_DATA_user ||
-                    a->data_type == BCH_DATA_cached ||
-                    a->data_type == BCH_DATA_parity)) {
-                       g->data_type            = a->data_type;
-                       g->dirty_sectors        = a->dirty_sectors;
-                       g->cached_sectors       = a->cached_sectors;
-                       g->stripe               = a->stripe;
-                       g->stripe_redundancy    = a->stripe_redundancy;
-               }
+       int ret = bch2_trans_run(c,
+               for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
+                                        BTREE_ITER_PREFETCH, k, ({
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
+                       struct bucket *g = gc_bucket(ca, k.k->p.offset);
 
-               0;
-       }));
-err:
-       bch2_trans_put(trans);
+                       struct bch_alloc_v4 a_convert;
+                       const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
+
+                       g->gen_valid    = 1;
+                       g->gen          = a->gen;
+
+                       if (metadata_only &&
+                           (a->data_type == BCH_DATA_user ||
+                            a->data_type == BCH_DATA_cached ||
+                            a->data_type == BCH_DATA_parity)) {
+                               g->data_type            = a->data_type;
+                               g->dirty_sectors        = a->dirty_sectors;
+                               g->cached_sectors       = a->cached_sectors;
+                               g->stripe               = a->stripe;
+                               g->stripe_redundancy    = a->stripe_redundancy;
+                       }
+
+                       0;
+               })));
        bch_err_fn(c, ret);
        return ret;
 }
 
 static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
 {
-       struct bch_dev *ca;
-       unsigned i;
-
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                struct bucket_array *buckets = gc_bucket_array(ca);
                struct bucket *g;
 
 
 int bch2_gc_gens(struct bch_fs *c)
 {
-       struct btree_trans *trans;
-       struct bch_dev *ca;
        u64 b, start_time = local_clock();
-       unsigned i;
        int ret;
 
        /*
 
        trace_and_count(c, gc_gens_start, c);
        down_read(&c->gc_lock);
-       trans = bch2_trans_get(c);
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                struct bucket_gens *gens = bucket_gens(ca);
 
                BUG_ON(ca->oldest_gen);
                        ca->oldest_gen[b] = gens->b[b];
        }
 
-       for (i = 0; i < BTREE_ID_NR; i++)
+       for (unsigned i = 0; i < BTREE_ID_NR; i++)
                if (btree_type_has_ptrs(i)) {
                        c->gc_gens_btree = i;
                        c->gc_gens_pos = POS_MIN;
 
-                       ret = for_each_btree_key_commit(trans, iter, i,
-                                       POS_MIN,
-                                       BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
-                                       k,
-                                       NULL, NULL,
-                                       BCH_TRANS_COMMIT_no_enospc,
-                               gc_btree_gens_key(trans, &iter, k));
-                       if (!bch2_err_matches(ret, EROFS))
-                               bch_err_fn(c, ret);
+                       ret = bch2_trans_run(c,
+                               for_each_btree_key_commit(trans, iter, i,
+                                               POS_MIN,
+                                               BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
+                                               k,
+                                               NULL, NULL,
+                                               BCH_TRANS_COMMIT_no_enospc,
+                                       gc_btree_gens_key(trans, &iter, k)));
                        if (ret)
                                goto err;
                }
 
-       ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
-                       POS_MIN,
-                       BTREE_ITER_PREFETCH,
-                       k,
-                       NULL, NULL,
-                       BCH_TRANS_COMMIT_no_enospc,
-               bch2_alloc_write_oldest_gen(trans, &iter, k));
-       if (!bch2_err_matches(ret, EROFS))
-               bch_err_fn(c, ret);
+       ret = bch2_trans_run(c,
+               for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
+                               POS_MIN,
+                               BTREE_ITER_PREFETCH,
+                               k,
+                               NULL, NULL,
+                               BCH_TRANS_COMMIT_no_enospc,
+                       bch2_alloc_write_oldest_gen(trans, &iter, k)));
        if (ret)
                goto err;
 
        bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
        trace_and_count(c, gc_gens_end, c);
 err:
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                kvfree(ca->oldest_gen);
                ca->oldest_gen = NULL;
        }
 
-       bch2_trans_put(trans);
        up_read(&c->gc_lock);
        mutex_unlock(&c->gc_gens_lock);
+       if (!bch2_err_matches(ret, EROFS))
+               bch_err_fn(c, ret);
        return ret;
 }
 
 
 
 void bch2_fs_usage_initialize(struct bch_fs *c)
 {
-       struct bch_fs_usage *usage;
-       struct bch_dev *ca;
-       unsigned i;
-
        percpu_down_write(&c->mark_lock);
-       usage = c->usage_base;
+       struct bch_fs_usage *usage = c->usage_base;
 
-       for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+       for (unsigned i = 0; i < ARRAY_SIZE(c->usage); i++)
                bch2_fs_usage_acc_to_base(c, i);
 
-       for (i = 0; i < BCH_REPLICAS_MAX; i++)
+       for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++)
                usage->reserved += usage->persistent_reserved[i];
 
-       for (i = 0; i < c->replicas.nr; i++) {
+       for (unsigned i = 0; i < c->replicas.nr; i++) {
                struct bch_replicas_entry_v1 *e =
                        cpu_replicas_entry(&c->replicas, i);
 
                fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
        }
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                struct bch_dev_usage dev = bch2_dev_usage_read(ca);
 
                usage->hidden += (dev.d[BCH_DATA_sb].buckets +
 
 int bch2_trans_mark_dev_sbs(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
-
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                int ret = bch2_trans_mark_dev_sb(c, ca);
                if (ret) {
                        percpu_ref_put(&ca->ref);
 
                                    struct bch_ioctl_disk_get_idx arg)
 {
        dev_t dev = huge_decode_dev(arg.dev);
-       struct bch_dev *ca;
-       unsigned i;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
        if (!dev)
                return -EINVAL;
 
-       for_each_online_member(ca, c, i)
+       for_each_online_member(c, ca)
                if (ca->dev == dev) {
                        percpu_ref_put(&ca->io_ref);
-                       return i;
+                       return ca->dev_idx;
                }
 
        return -BCH_ERR_ENOENT_dev_idx_not_found;
 
 static int bch2_show_devname(struct seq_file *seq, struct dentry *root)
 {
        struct bch_fs *c = root->d_sb->s_fs_info;
-       struct bch_dev *ca;
-       unsigned i;
        bool first = true;
 
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                if (!first)
                        seq_putc(seq, ':');
                first = false;
                                 int flags, const char *dev_name, void *data)
 {
        struct bch_fs *c;
-       struct bch_dev *ca;
        struct super_block *sb;
        struct inode *vinode;
        struct bch_opts opts = bch2_opts_empty();
        char **devs;
        struct bch_fs **devs_to_fs = NULL;
-       unsigned i, nr_devs;
+       unsigned nr_devs;
        int ret;
 
        opt_set(opts, read_only, (flags & SB_RDONLY) != 0);
                goto got_sb;
        }
 
-       for (i = 0; i < nr_devs; i++)
+       for (unsigned i = 0; i < nr_devs; i++)
                devs_to_fs[i] = bch2_path_to_fs(devs[i]);
 
        sb = sget(fs_type, bch2_test_super, bch2_noset_super,
 
        sb->s_bdi->ra_pages             = VM_READAHEAD_PAGES;
 
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                struct block_device *bdev = ca->disk_sb.bdev;
 
                /* XXX: create an anonymous device for multi device filesystems */
 
 
 int bch2_fs_journal_alloc(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
-
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                if (ca->journal.nr)
                        continue;
 
 
        struct journal_list jlist;
        struct journal_replay *i, **_i, *prev = NULL;
        struct genradix_iter radix_iter;
-       struct bch_dev *ca;
-       unsigned iter;
        struct printbuf buf = PRINTBUF;
        bool degraded = false, last_write_torn = false;
        u64 seq;
        jlist.last_seq = 0;
        jlist.ret = 0;
 
-       for_each_member_device(ca, c, iter) {
+       for_each_member_device(c, ca) {
                if (!c->opts.fsck &&
                    !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
                        continue;
                        continue;
 
                for (ptr = 0; ptr < i->nr_ptrs; ptr++) {
-                       ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
 
                        if (!i->ptrs[ptr].csum_good)
                                bch_err_dev_offset(ca, i->ptrs[ptr].sector,
 {
        closure_type(j, struct journal, io);
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
-       struct bch_dev *ca;
        struct journal_buf *w = journal_last_unwritten_buf(j);
        struct bch_replicas_padded replicas;
        struct bio *bio;
        struct printbuf journal_debug_buf = PRINTBUF;
-       unsigned i, nr_rw_members = 0;
+       unsigned nr_rw_members = 0;
        int ret;
 
        BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
        if (c->opts.nochanges)
                goto no_io;
 
-       for_each_rw_member(ca, c, i)
+       for_each_rw_member(c, ca)
                nr_rw_members++;
 
        if (nr_rw_members > 1)
                goto err;
 
        if (!JSET_NO_FLUSH(w->data) && w->separate_flush) {
-               for_each_rw_member(ca, c, i) {
+               for_each_rw_member(c, ca) {
                        percpu_ref_get(&ca->io_ref);
 
                        bio = ca->journal.bio;
 
 void bch2_journal_do_discards(struct journal *j)
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
-       struct bch_dev *ca;
-       unsigned iter;
 
        mutex_lock(&j->discard_lock);
 
-       for_each_rw_member(ca, c, iter) {
+       for_each_rw_member(c, ca) {
                struct journal_device *ja = &ca->journal;
 
                while (should_discard_bucket(j, ja)) {
 static u64 journal_seq_to_flush(struct journal *j)
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
-       struct bch_dev *ca;
        u64 seq_to_flush = 0;
-       unsigned iter;
 
        spin_lock(&j->lock);
 
-       for_each_rw_member(ca, c, iter) {
+       for_each_rw_member(c, ca) {
                struct journal_device *ja = &ca->journal;
                unsigned nr_buckets, bucket_to_flush;
 
 
  */
 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned dev_idx;
        s64 wait = S64_MAX, fragmented_allowed, fragmented;
-       unsigned i;
 
-       for_each_rw_member(ca, c, dev_idx) {
+       for_each_rw_member(c, ca) {
                struct bch_dev_usage usage = bch2_dev_usage_read(ca);
 
                fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
                                       ca->mi.bucket_size) >> 1);
                fragmented = 0;
 
-               for (i = 0; i < BCH_DATA_NR; i++)
+               for (unsigned i = 0; i < BCH_DATA_NR; i++)
                        if (data_type_movable(i))
                                fragmented += usage.d[i].fragmented;
 
 
        struct bch_inode_unpacked root_inode, lostfound_inode;
        struct bkey_inode_buf packed_inode;
        struct qstr lostfound = QSTR("lost+found");
-       struct bch_dev *ca;
-       unsigned i;
        int ret;
 
        bch_notice(c, "initializing new filesystem");
        set_bit(BCH_FS_may_go_rw, &c->flags);
        set_bit(BCH_FS_fsck_done, &c->flags);
 
-       for (i = 0; i < BTREE_ID_NR; i++)
+       for (unsigned i = 0; i < BTREE_ID_NR; i++)
                bch2_btree_root_alloc(c, i);
 
-       for_each_member_device(ca, c, i)
+       for_each_member_device(c, ca)
                bch2_dev_usage_init(ca);
 
        ret = bch2_fs_journal_alloc(c);
        if (ret)
                goto err;
 
-       for_each_online_member(ca, c, i)
+       for_each_online_member(c, ca)
                ca->new_fs_bucket_idx = 0;
 
        ret = bch2_fs_freespace_init(c);
 
        return 0;
 err:
-       bch_err_fn(ca, ret);
+       bch_err_fn(c, ret);
        return ret;
 }
 
                                           struct jset_entry **end,
                                           u64 journal_seq)
 {
-       struct bch_dev *ca;
-       unsigned i, dev;
-
        percpu_down_read(&c->mark_lock);
 
        if (!journal_seq) {
-               for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+               for (unsigned i = 0; i < ARRAY_SIZE(c->usage); i++)
                        bch2_fs_usage_acc_to_base(c, i);
        } else {
                bch2_fs_usage_acc_to_base(c, journal_seq & JOURNAL_BUF_MASK);
                u->v            = cpu_to_le64(atomic64_read(&c->key_version));
        }
 
-       for (i = 0; i < BCH_REPLICAS_MAX; i++) {
+       for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++) {
                struct jset_entry_usage *u =
                        container_of(jset_entry_init(end, sizeof(*u)),
                                     struct jset_entry_usage, entry);
                u->v            = cpu_to_le64(c->usage_base->persistent_reserved[i]);
        }
 
-       for (i = 0; i < c->replicas.nr; i++) {
+       for (unsigned i = 0; i < c->replicas.nr; i++) {
                struct bch_replicas_entry_v1 *e =
                        cpu_replicas_entry(&c->replicas, i);
                struct jset_entry_data_usage *u =
                              "embedded variable length struct");
        }
 
-       for_each_member_device(ca, c, dev) {
+       for_each_member_device(c, ca) {
                unsigned b = sizeof(struct jset_entry_dev_usage) +
                        sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR;
                struct jset_entry_dev_usage *u =
                                     struct jset_entry_dev_usage, entry);
 
                u->entry.type = BCH_JSET_ENTRY_dev_usage;
-               u->dev = cpu_to_le32(dev);
+               u->dev = cpu_to_le32(ca->dev_idx);
 
-               for (i = 0; i < BCH_DATA_NR; i++) {
+               for (unsigned i = 0; i < BCH_DATA_NR; i++) {
                        u->d[i].buckets = cpu_to_le64(ca->usage_base->d[i].buckets);
                        u->d[i].sectors = cpu_to_le64(ca->usage_base->d[i].sectors);
                        u->d[i].fragmented = cpu_to_le64(ca->usage_base->d[i].fragmented);
 
        percpu_up_read(&c->mark_lock);
 
-       for (i = 0; i < 2; i++) {
+       for (unsigned i = 0; i < 2; i++) {
                struct jset_entry_clock *clock =
                        container_of(jset_entry_init(end, sizeof(*clock)),
                                     struct jset_entry_clock, entry);
 
 #define for_each_member_device_rcu(ca, c, iter, mask)                  \
        for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++)
 
-static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
+static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
 {
-       struct bch_dev *ca;
+       unsigned idx = ca ? ca->dev_idx + 1 : 0;
+
+       if (ca)
+               percpu_ref_put(&ca->ref);
 
        rcu_read_lock();
-       if ((ca = __bch2_next_dev(c, iter, NULL)))
+       if ((ca = __bch2_next_dev(c, &idx, NULL)))
                percpu_ref_get(&ca->ref);
        rcu_read_unlock();
 
 /*
  * If you break early, you must drop your ref on the current device
  */
-#define for_each_member_device(ca, c, iter)                            \
-       for ((iter) = 0;                                                \
-            (ca = bch2_get_next_dev(c, &(iter)));                      \
-            percpu_ref_put(&ca->ref), (iter)++)
+#define __for_each_member_device(_c, _ca)                              \
+       for (;  (_ca = bch2_get_next_dev(_c, _ca));)
+
+#define for_each_member_device(_c, _ca)                                        \
+       for (struct bch_dev *_ca = NULL;                                \
+            (_ca = bch2_get_next_dev(_c, _ca));)
 
 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
-                                                     unsigned *iter,
-                                                     int state_mask)
+                                                      struct bch_dev *ca,
+                                                      unsigned state_mask)
 {
-       struct bch_dev *ca;
+       unsigned idx = ca ? ca->dev_idx + 1 : 0;
+
+       if (ca)
+               percpu_ref_put(&ca->io_ref);
 
        rcu_read_lock();
-       while ((ca = __bch2_next_dev(c, iter, NULL)) &&
+       while ((ca = __bch2_next_dev(c, &idx, NULL)) &&
               (!((1 << ca->mi.state) & state_mask) ||
                !percpu_ref_tryget(&ca->io_ref)))
-               (*iter)++;
+               idx++;
        rcu_read_unlock();
 
        return ca;
 }
 
-#define __for_each_online_member(ca, c, iter, state_mask)              \
-       for ((iter) = 0;                                                \
-            (ca = bch2_get_next_online_dev(c, &(iter), state_mask));   \
-            percpu_ref_put(&ca->io_ref), (iter)++)
+#define __for_each_online_member(_c, _ca, state_mask)                  \
+       for (struct bch_dev *_ca = NULL;                                \
+            (_ca = bch2_get_next_online_dev(_c, _ca, state_mask));)
 
-#define for_each_online_member(ca, c, iter)                            \
-       __for_each_online_member(ca, c, iter, ~0)
+#define for_each_online_member(c, ca)                                  \
+       __for_each_online_member(c, ca, ~0)
 
-#define for_each_rw_member(ca, c, iter)                                        \
-       __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_rw)
+#define for_each_rw_member(c, ca)                                      \
+       __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw))
 
-#define for_each_readable_member(ca, c, iter)                          \
-       __for_each_online_member(ca, c, iter,                           \
-               (1 << BCH_MEMBER_STATE_rw)|(1 << BCH_MEMBER_STATE_ro))
+#define for_each_readable_member(c, ca)                                \
+       __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
 
 /*
  * If a key exists that references a device, the device won't be going away and
 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
 {
        struct bch_devs_mask devs;
-       struct bch_dev *ca;
-       unsigned i;
 
        memset(&devs, 0, sizeof(devs));
-       for_each_online_member(ca, c, i)
+       for_each_online_member(c, ca)
                __set_bit(ca->dev_idx, devs.d);
        return devs;
 }
 
 
        if (sb->fs_sb) {
                struct bch_fs *c = container_of(sb, struct bch_fs, disk_sb);
-               struct bch_dev *ca;
-               unsigned i;
 
                lockdep_assert_held(&c->sb_lock);
 
                /* XXX: we're not checking that offline device have enough space */
 
-               for_each_online_member(ca, c, i) {
+               for_each_online_member(c, ca) {
                        struct bch_sb_handle *dev_sb = &ca->disk_sb;
 
                        if (bch2_sb_realloc(dev_sb, le32_to_cpu(dev_sb->sb->u64s) + d)) {
 static void bch2_sb_update(struct bch_fs *c)
 {
        struct bch_sb *src = c->disk_sb.sb;
-       struct bch_dev *ca;
-       unsigned i;
 
        lockdep_assert_held(&c->sb_lock);
 
                le_bitvector_to_cpu(c->sb.errors_silent, (void *) ext->errors_silent,
                                    sizeof(c->sb.errors_silent) * 8);
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                struct bch_member m = bch2_sb_member_get(src, ca->dev_idx);
                ca->mi = bch2_mi_to_cpu(&m);
        }
 int bch2_write_super(struct bch_fs *c)
 {
        struct closure *cl = &c->sb_write;
-       struct bch_dev *ca;
        struct printbuf err = PRINTBUF;
-       unsigned i, sb = 0, nr_wrote;
+       unsigned sb = 0, nr_wrote;
        struct bch_devs_mask sb_written;
        bool wrote, can_mount_without_written, can_mount_with_written;
        unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
        bch2_sb_errors_from_cpu(c);
        bch2_sb_downgrade_update(c);
 
-       for_each_online_member(ca, c, i)
+       for_each_online_member(c, ca)
                bch2_sb_from_fs(c, ca);
 
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                printbuf_reset(&err);
 
                ret = bch2_sb_validate(&ca->disk_sb, &err, WRITE);
                return -BCH_ERR_sb_not_downgraded;
        }
 
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                __set_bit(ca->dev_idx, sb_written.d);
                ca->sb_write_error = 0;
        }
 
-       for_each_online_member(ca, c, i)
+       for_each_online_member(c, ca)
                read_back_super(c, ca);
        closure_sync(cl);
 
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                if (ca->sb_write_error)
                        continue;
 
 
        do {
                wrote = false;
-               for_each_online_member(ca, c, i)
+               for_each_online_member(c, ca)
                        if (!ca->sb_write_error &&
                            sb < ca->disk_sb.sb->layout.nr_superblocks) {
                                write_one_super(c, ca, sb);
                sb++;
        } while (wrote);
 
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                if (ca->sb_write_error)
                        __clear_bit(ca->dev_idx, sb_written.d);
                else
        can_mount_with_written =
                bch2_have_enough_devs(c, sb_written, degraded_flags, false);
 
-       for (i = 0; i < ARRAY_SIZE(sb_written.d); i++)
+       for (unsigned i = 0; i < ARRAY_SIZE(sb_written.d); i++)
                sb_written.d[i] = ~sb_written.d[i];
 
        can_mount_without_written =
 
 
 static void __bch2_fs_read_only(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i, clean_passes = 0;
+       unsigned clean_passes = 0;
        u64 seq = 0;
 
        bch2_fs_ec_stop(c);
        /*
         * After stopping journal:
         */
-       for_each_member_device(ca, c, i)
+       for_each_member_device(c, ca)
                bch2_dev_allocator_remove(c, ca);
 }
 
 
 static int __bch2_fs_read_write(struct bch_fs *c, bool early)
 {
-       struct bch_dev *ca;
-       unsigned i;
        int ret;
 
        if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
         */
        set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
 
-       for_each_rw_member(ca, c, i)
+       for_each_rw_member(c, ca)
                bch2_dev_allocator_add(c, ca);
        bch2_recalc_capacity(c);
 
 #ifndef BCH_WRITE_REF_DEBUG
        percpu_ref_reinit(&c->writes);
 #else
-       for (i = 0; i < BCH_WRITE_REF_NR; i++) {
+       for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) {
                BUG_ON(atomic_long_read(&c->writes[i]));
                atomic_long_inc(&c->writes[i]);
        }
 
 void __bch2_fs_stop(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
-
        bch_verbose(c, "shutting down");
 
        set_bit(BCH_FS_stopping, &c->flags);
        bch2_fs_read_only(c);
        up_write(&c->state_lock);
 
-       for_each_member_device(ca, c, i)
+       for_each_member_device(c, ca)
                if (ca->kobj.state_in_sysfs &&
                    ca->disk_sb.bdev)
                        sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
        /* btree prefetch might have kicked off reads in the background: */
        bch2_btree_flush_all_reads(c);
 
-       for_each_member_device(ca, c, i)
+       for_each_member_device(c, ca)
                cancel_work_sync(&ca->io_error_work);
 
        cancel_work_sync(&c->read_only_work);
 
 static int bch2_fs_online(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
        int ret = 0;
 
        lockdep_assert_held(&bch_fs_list_lock);
 
        down_write(&c->state_lock);
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                ret = bch2_dev_sysfs_online(c, ca);
                if (ret) {
                        bch_err(c, "error creating sysfs objects");
 
 int bch2_fs_start(struct bch_fs *c)
 {
-       struct bch_dev *ca;
        time64_t now = ktime_get_real_seconds();
-       unsigned i;
        int ret;
 
        print_mount_opts(c);
                goto err;
        }
 
-       for_each_online_member(ca, c, i)
-               bch2_members_v2_get_mut(c->disk_sb.sb, i)->last_mount = cpu_to_le64(now);
+       for_each_online_member(c, ca)
+               bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now);
 
        mutex_unlock(&c->sb_lock);
 
-       for_each_rw_member(ca, c, i)
+       for_each_rw_member(c, ca)
                bch2_dev_allocator_add(c, ca);
        bch2_recalc_capacity(c);
 
                            enum bch_member_state new_state, int flags)
 {
        struct bch_devs_mask new_online_devs;
-       struct bch_dev *ca2;
-       int i, nr_rw = 0, required;
+       int nr_rw = 0, required;
 
        lockdep_assert_held(&c->state_lock);
 
                        return true;
 
                /* do we have enough devices to write to?  */
-               for_each_member_device(ca2, c, i)
+               for_each_member_device(c, ca2)
                        if (ca2 != ca)
                                nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;