void bch2_trans_verify_paths(struct btree_trans *trans)
 {
        struct btree_path *path;
+       unsigned iter;
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, iter)
                bch2_btree_path_verify(trans, path);
 }
 
        struct bch_fs *c = trans->c;
        struct btree_path *path;
        unsigned long trace_ip = _RET_IP_;
-       int i, ret = 0;
+       unsigned i;
+       int ret = 0;
 
        if (trans->in_traverse_all)
                return -BCH_ERR_transaction_restart_in_traverse_all;
        trans->restarted = 0;
        trans->last_restarted_ip = 0;
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                path->should_be_locked = false;
 
        btree_trans_sort_paths(trans);
 
        BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, BTREE_ITER_MAX) - 1);
 
-       trans_for_each_path(trans, path) {
+       trans_for_each_path(trans, path, i) {
                BUG_ON(path->sorted_idx >= trans->nr_sorted);
-               BUG_ON(trans->sorted[path->sorted_idx] != path->idx);
+               BUG_ON(trans->sorted[path->sorted_idx] != i);
        }
 
        for (i = 0; i < trans->nr_sorted; i++) {
        if (trans->srcu_held) {
                struct bch_fs *c = trans->c;
                struct btree_path *path;
+               unsigned i;
 
-               trans_for_each_path(trans, path)
+               trans_for_each_path(trans, path, i)
                        if (path->cached && !btree_node_locked(path, 0))
                                path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
 
 u32 bch2_trans_begin(struct btree_trans *trans)
 {
        struct btree_path *path;
+       unsigned i;
        u64 now;
 
        bch2_trans_reset_updates(trans);
        trans->mem_top                  = 0;
        trans->journal_entries          = NULL;
 
-       trans_for_each_path(trans, path) {
+       trans_for_each_path(trans, path, i) {
                path->should_be_locked = false;
 
                /*
                 * iterators if we do that
                 */
                if (!path->ref && !path->preserve)
-                       __bch2_path_free(trans, path->idx);
+                       __bch2_path_free(trans, i);
                else
                        path->preserve = false;
        }
 #ifdef CONFIG_BCACHEFS_DEBUG
        struct bch_fs *c = trans->c;
        struct btree_path *path;
+       unsigned i;
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                if (path->ref)
                        goto leaked;
        return;
 leaked:
        bch_err(c, "btree paths leaked from %s!", trans->fn);
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                if (path->ref)
                        printk(KERN_ERR "  btree %s %pS\n",
                               bch2_btree_id_str(path->btree_id),
 
        __bch2_btree_trans_sort_paths(trans);
 }
 
-static inline struct btree_path *
-__trans_next_path(struct btree_trans *trans, unsigned idx)
-{
-       idx = find_next_bit(trans->paths_allocated, BTREE_ITER_MAX, idx);
-       if (idx == BTREE_ITER_MAX)
-               return NULL;
-       EBUG_ON(idx > BTREE_ITER_MAX);
-       EBUG_ON(trans->paths[idx].idx != idx);
-       return &trans->paths[idx];
-}
-
-#define trans_for_each_path(_trans, _path)                             \
-       for (_path = __trans_next_path((_trans), 1);                    \
-            (_path);                                                   \
-            _path = __trans_next_path((_trans), (_path)->idx + 1))
-
 static inline struct btree_path *
 __trans_next_path_safe(struct btree_trans *trans, unsigned *idx)
 {
 #define trans_for_each_path_safe(_trans, _path, _idx)                  \
        trans_for_each_path_safe_from(_trans, _path, _idx, 1)
 
+static inline struct btree_path *
+__trans_next_path(struct btree_trans *trans, unsigned *idx)
+{
+       struct btree_path *path = __trans_next_path_safe(trans, idx);
+       EBUG_ON(path && path->idx != *idx);
+       return path;
+}
+
+#define trans_for_each_path(_trans, _path, _iter)                      \
+       for (_iter = 1;                                                 \
+            (_path = __trans_next_path((_trans), &_iter));             \
+            _iter++)
+
 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
 {
        unsigned idx = path ? path->sorted_idx + 1 : 0;
 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
                            unsigned idx)
 {
-       struct btree_path *path = __trans_next_path(trans, idx);
+       struct btree_path *path = __trans_next_path(trans, &idx);
 
-       while (path && !__path_has_node(path, b))
-               path = __trans_next_path(trans, path->idx + 1);
+       while ((path = __trans_next_path(trans, &idx)) &&
+               !__path_has_node(path, b))
+              idx++;
 
        return path;
 }
 
                }
        } else {
                struct btree_path *path2;
+               unsigned i;
 evict:
-               trans_for_each_path(trans, path2)
+               trans_for_each_path(trans, path2, i)
                        if (path2 != path)
                                __bch2_btree_path_unlock(trans, path2);
 
 
 {
        struct btree_path *path;
        struct six_lock_count ret;
+       unsigned i;
 
        memset(&ret, 0, sizeof(ret));
 
        if (IS_ERR_OR_NULL(b))
                return ret;
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                if (path != skip && &path->l[level].b->c == b) {
                        int t = btree_node_locked_type(path, level);
 
                                       struct btree_bkey_cached_common *b)
 {
        struct btree_path *linked;
-       unsigned i;
+       unsigned i, iter;
        int ret;
 
        /*
         * already taken are no longer needed:
         */
 
-       trans_for_each_path(trans, linked) {
+       trans_for_each_path(trans, linked, iter) {
                if (!linked->nodes_locked)
                        continue;
 
                               unsigned new_locks_want,
                               struct get_locks_fail *f)
 {
-       struct btree_path *linked;
-
        if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
                return true;
 
         * before interior nodes - now that's handled by
         * bch2_btree_path_traverse_all().
         */
-       if (!path->cached && !trans->in_traverse_all)
-               trans_for_each_path(trans, linked)
+       if (!path->cached && !trans->in_traverse_all) {
+               struct btree_path *linked;
+               unsigned i;
+
+               trans_for_each_path(trans, linked, i)
                        if (linked != path &&
                            linked->cached == path->cached &&
                            linked->btree_id == path->btree_id &&
                                linked->locks_want = new_locks_want;
                                btree_path_get_locks(trans, linked, true, NULL);
                        }
+       }
 
        return false;
 }
 void bch2_trans_downgrade(struct btree_trans *trans)
 {
        struct btree_path *path;
+       unsigned i;
 
        if (trans->restarted)
                return;
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                bch2_btree_path_downgrade(trans, path);
 }
 
 int bch2_trans_relock(struct btree_trans *trans)
 {
        struct btree_path *path;
+       unsigned i;
 
        if (unlikely(trans->restarted))
                return -((int) trans->restarted);
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                if (path->should_be_locked &&
                    !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
                        trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
 int bch2_trans_relock_notrace(struct btree_trans *trans)
 {
        struct btree_path *path;
+       unsigned i;
 
        if (unlikely(trans->restarted))
                return -((int) trans->restarted);
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                if (path->should_be_locked &&
                    !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
                        return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
 void bch2_trans_unlock_noassert(struct btree_trans *trans)
 {
        struct btree_path *path;
+       unsigned i;
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                __bch2_btree_path_unlock(trans, path);
 }
 
 void bch2_trans_unlock(struct btree_trans *trans)
 {
        struct btree_path *path;
+       unsigned i;
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                __bch2_btree_path_unlock(trans, path);
 }
 
 bool bch2_trans_locked(struct btree_trans *trans)
 {
        struct btree_path *path;
+       unsigned i;
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                if (path->nodes_locked)
                        return true;
        return false;
 void bch2_trans_verify_locks(struct btree_trans *trans)
 {
        struct btree_path *path;
+       unsigned i;
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                bch2_btree_path_verify_locks(path);
 }
 
 
                                             enum btree_node_locked_type want)
 {
        struct btree_path *path;
+       unsigned i;
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                if (&path->l[level].b->c == b &&
                    btree_node_locked_type(path, level) >= want) {
                        six_lock_increment(&b->lock, (enum six_lock_type) want);
 
                                       struct btree *b)
 {
        struct bch_fs *c = trans->c;
-       unsigned level = b->c.level;
+       unsigned i, level = b->c.level;
 
        bch2_btree_node_lock_write_nofail(trans, path, &b->c);
        bch2_btree_node_hash_remove(&c->btree_cache, b);
        six_unlock_write(&b->c.lock);
        mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                if (path->l[level].b == b) {
                        btree_node_unlock(trans, path, level);
                        path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
        struct bch_fs *c = as->c;
        struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL];
        struct btree_path *path;
-       unsigned level = b->c.level;
+       unsigned i, level = b->c.level;
 
        BUG_ON(!list_empty(&b->write_blocked));
        BUG_ON(b->will_make_reachable != (1UL|(unsigned long) as));
 
        six_unlock_intent(&b->c.lock);
 
-       trans_for_each_path(trans, path)
+       trans_for_each_path(trans, path, i)
                if (path->l[level].b == b) {
                        btree_node_unlock(trans, path, level);
                        path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);