]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
bcachefs: btree_insert_entry -> btree_path_idx_t
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 10 Dec 2023 21:10:24 +0000 (16:10 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Mon, 1 Jan 2024 16:47:43 +0000 (11:47 -0500)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_key_cache.c
fs/bcachefs/btree_trans_commit.c
fs/bcachefs/btree_types.h
fs/bcachefs/btree_update.c
fs/bcachefs/btree_update.h

index 909c215fbb280f40869020e3f346d0003d19fa7c..e90282c85b3c84e29bef345fc641540b549ea909 100644 (file)
@@ -656,7 +656,7 @@ static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, str
                    i->btree_id == b->c.btree_id &&
                    bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
                    bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
-                       i->old_v = bch2_btree_path_peek_slot(i->path, &i->old_k).v;
+                       i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
 
                        if (unlikely(trans->journal_replay_not_finished)) {
                                struct bkey_i *j_k =
@@ -3008,7 +3008,7 @@ void bch2_trans_put(struct btree_trans *trans)
                s->max_mem = max(s->max_mem, trans->mem_max);
 
        trans_for_each_update(trans, i)
-               __btree_path_put(i->path, true);
+               __btree_path_put(trans->paths + i->path, true);
        trans->nr_updates               = 0;
 
        check_btree_paths_leaked(trans);
index 4fb708e91a8fecec1422695c1b659f3de5ff8ac8..99dc0b1a7fc21583ee3b2624ba7a11c865be56bd 100644 (file)
@@ -755,7 +755,7 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
                                  struct btree_insert_entry *insert_entry)
 {
        struct bch_fs *c = trans->c;
-       struct bkey_cached *ck = (void *) insert_entry->path->l[0].b;
+       struct bkey_cached *ck = (void *) (trans->paths + insert_entry->path)->l[0].b;
        struct bkey_i *insert = insert_entry->k;
        bool kick_reclaim = false;
 
index 9fe22628a94616a9b28c907e3e081fb37f0c3cc0..0e3603d367f9d6cd499d58958c1ffdd70b6f67b5 100644 (file)
@@ -23,7 +23,7 @@ static void verify_update_old_key(struct btree_trans *trans, struct btree_insert
 #ifdef CONFIG_BCACHEFS_DEBUG
        struct bch_fs *c = trans->c;
        struct bkey u;
-       struct bkey_s_c k = bch2_btree_path_peek_slot_exact(i->path, &u);
+       struct bkey_s_c k = bch2_btree_path_peek_slot_exact(trans->paths + i->path, &u);
 
        if (unlikely(trans->journal_replay_not_finished)) {
                struct bkey_i *j_k =
@@ -41,23 +41,23 @@ static void verify_update_old_key(struct btree_trans *trans, struct btree_insert
 #endif
 }
 
-static inline struct btree_path_level *insert_l(struct btree_insert_entry *i)
+static inline struct btree_path_level *insert_l(struct btree_trans *trans, struct btree_insert_entry *i)
 {
-       return i->path->l + i->level;
+       return (trans->paths + i->path)->l + i->level;
 }
 
 static inline bool same_leaf_as_prev(struct btree_trans *trans,
                                     struct btree_insert_entry *i)
 {
        return i != trans->updates &&
-               insert_l(&i[0])->b == insert_l(&i[-1])->b;
+               insert_l(trans, &i[0])->b == insert_l(trans, &i[-1])->b;
 }
 
 static inline bool same_leaf_as_next(struct btree_trans *trans,
                                     struct btree_insert_entry *i)
 {
        return i + 1 < trans->updates + trans->nr_updates &&
-               insert_l(&i[0])->b == insert_l(&i[1])->b;
+               insert_l(trans, &i[0])->b == insert_l(trans, &i[1])->b;
 }
 
 inline void bch2_btree_node_prep_for_write(struct btree_trans *trans,
@@ -84,7 +84,7 @@ static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btre
                if (same_leaf_as_prev(trans, i))
                        continue;
 
-               bch2_btree_node_unlock_write(trans, i->path, insert_l(i)->b);
+               bch2_btree_node_unlock_write(trans, trans->paths + i->path, insert_l(trans, i)->b);
        }
 
        trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
@@ -99,11 +99,11 @@ static inline int bch2_trans_lock_write(struct btree_trans *trans)
                if (same_leaf_as_prev(trans, i))
                        continue;
 
-               if (bch2_btree_node_lock_write(trans, i->path, &insert_l(i)->b->c))
+               if (bch2_btree_node_lock_write(trans, trans->paths + i->path, &insert_l(trans, i)->b->c))
                        return trans_lock_write_fail(trans, i);
 
                if (!i->cached)
-                       bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b);
+                       bch2_btree_node_prep_for_write(trans, trans->paths + i->path, insert_l(trans, i)->b);
        }
 
        trans->write_locked = true;
@@ -115,8 +115,8 @@ static inline void bch2_trans_unlock_write(struct btree_trans *trans)
        if (likely(trans->write_locked)) {
                trans_for_each_update(trans, i)
                        if (!same_leaf_as_prev(trans, i))
-                               bch2_btree_node_unlock_write_inlined(trans, i->path,
-                                                                    insert_l(i)->b);
+                               bch2_btree_node_unlock_write_inlined(trans,
+                                               trans->paths + i->path, insert_l(trans, i)->b);
                trans->write_locked = false;
        }
 }
@@ -307,10 +307,12 @@ inline void bch2_btree_insert_key_leaf(struct btree_trans *trans,
 static inline void btree_insert_entry_checks(struct btree_trans *trans,
                                             struct btree_insert_entry *i)
 {
-       BUG_ON(!bpos_eq(i->k->k.p, i->path->pos));
-       BUG_ON(i->cached        != i->path->cached);
-       BUG_ON(i->level         != i->path->level);
-       BUG_ON(i->btree_id      != i->path->btree_id);
+       struct btree_path *path = trans->paths + i->path;
+
+       BUG_ON(!bpos_eq(i->k->k.p, path->pos));
+       BUG_ON(i->cached        != path->cached);
+       BUG_ON(i->level         != path->level);
+       BUG_ON(i->btree_id      != path->btree_id);
        EBUG_ON(!i->level &&
                btree_type_has_snapshots(i->btree_id) &&
                !(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
@@ -453,11 +455,9 @@ static int run_one_mem_trigger(struct btree_trans *trans,
                                old, bkey_i_to_s_c(new),
                                BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
        } else {
-               struct bkey             _deleted = KEY(0, 0, 0);
+               struct bkey             _deleted = POS_KEY((trans->paths + i->path)->pos);
                struct bkey_s_c         deleted = (struct bkey_s_c) { &_deleted, NULL };
 
-               _deleted.p = i->path->pos;
-
                ret   = bch2_mark_key(trans, i->btree_id, i->level,
                                deleted, bkey_i_to_s_c(new),
                                BTREE_TRIGGER_INSERT|flags) ?:
@@ -600,7 +600,7 @@ static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
                 */
                BUG_ON(i->cached || i->level);
 
-               if (gc_visited(c, gc_pos_btree_node(insert_l(i)->b))) {
+               if (gc_visited(c, gc_pos_btree_node(insert_l(trans, i)->b))) {
                        ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
                        if (ret)
                                break;
@@ -640,8 +640,8 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
 
                u64s += i->k->k.u64s;
                ret = !i->cached
-                       ? btree_key_can_insert(trans, insert_l(i)->b, u64s)
-                       : btree_key_can_insert_cached(trans, flags, i->path, u64s);
+                       ? btree_key_can_insert(trans, insert_l(trans, i)->b, u64s)
+                       : btree_key_can_insert_cached(trans, flags, trans->paths + i->path, u64s);
                if (ret) {
                        *stopped_at = i;
                        return ret;
@@ -746,13 +746,15 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
        }
 
        trans_for_each_update(trans, i) {
+               struct btree_path *path = trans->paths + i->path;
+
                if (!i->cached) {
-                       bch2_btree_insert_key_leaf(trans, i->path, i->k, trans->journal_res.seq);
+                       bch2_btree_insert_key_leaf(trans, path, i->k, trans->journal_res.seq);
                } else if (!i->key_cache_already_flushed)
                        bch2_btree_insert_key_cached(trans, flags, i);
                else {
-                       bch2_btree_key_cache_drop(trans, i->path);
-                       btree_path_set_dirty(i->path, BTREE_ITER_NEED_TRAVERSE);
+                       bch2_btree_key_cache_drop(trans, path);
+                       btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
                }
        }
 
@@ -821,7 +823,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags
 
                if (!same_leaf_as_next(trans, i)) {
                        if (u64s_delta <= 0) {
-                               ret = bch2_foreground_maybe_merge(trans, i->path,
+                               ret = bch2_foreground_maybe_merge(trans, trans->paths + i->path,
                                                        i->level, flags);
                                if (unlikely(ret))
                                        return ret;
@@ -875,11 +877,14 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
        struct bch_fs *c = trans->c;
 
        switch (ret) {
-       case -BCH_ERR_btree_insert_btree_node_full:
-               ret = bch2_btree_split_leaf(trans, i->path, flags);
+       case -BCH_ERR_btree_insert_btree_node_full: {
+               struct btree_path *path = trans->paths + i->path;
+
+               ret = bch2_btree_split_leaf(trans, path, flags);
                if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
-                       trace_and_count(c, trans_restart_btree_node_split, trans, trace_ip, i->path);
+                       trace_and_count(c, trans_restart_btree_node_split, trans, trace_ip, path);
                break;
+       }
        case -BCH_ERR_btree_insert_need_mark_replicas:
                ret = drop_locks_do(trans,
                        bch2_replicas_delta_list_mark(c, trans->fs_usage_deltas));
@@ -1015,13 +1020,15 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
                trans->journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s);
 
        trans_for_each_update(trans, i) {
-               EBUG_ON(!i->path->should_be_locked);
+               struct btree_path *path = trans->paths + i->path;
+
+               EBUG_ON(!path->should_be_locked);
 
-               ret = bch2_btree_path_upgrade(trans, i->path, i->level + 1);
+               ret = bch2_btree_path_upgrade(trans, path, i->level + 1);
                if (unlikely(ret))
                        goto out;
 
-               EBUG_ON(!btree_node_intent_locked(i->path, i->level));
+               EBUG_ON(!btree_node_intent_locked(path, i->level));
 
                if (i->key_cache_already_flushed)
                        continue;
index 4b96437c087e1090122d32187b82bc16f862cf9b..da7b836cbcf8bd6238470a719a497b5c91fb92b0 100644 (file)
@@ -351,8 +351,8 @@ struct btree_insert_entry {
         * to the size of the key being overwritten in the btree:
         */
        u8                      old_btree_u64s;
+       btree_path_idx_t        path;
        struct bkey_i           *k;
-       struct btree_path       *path;
        /* key being overwritten: */
        struct bkey             old_k;
        const struct bch_val    *old_v;
index 41f0009fb6256e52b06a1f435021baf72d705916..a5a99c3cb32ee58c798aecd69aa028a146b75461 100644 (file)
@@ -24,7 +24,7 @@ static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
 }
 
 static int __must_check
-bch2_trans_update_by_path(struct btree_trans *, struct btree_path *,
+bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t,
                          struct bkey_i *, enum btree_update_flags,
                          unsigned long ip);
 
@@ -266,7 +266,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
 
                bch2_cut_front(new.k->p, update);
 
-               ret = bch2_trans_update_by_path(trans, btree_iter_path(trans, iter), update,
+               ret = bch2_trans_update_by_path(trans, iter->path, update,
                                          BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
                                          flags, _RET_IP_);
                if (ret)
@@ -339,7 +339,6 @@ err:
 }
 
 static noinline int flush_new_cached_update(struct btree_trans *trans,
-                                           struct btree_path *path,
                                            struct btree_insert_entry *i,
                                            enum btree_update_flags flags,
                                            unsigned long ip)
@@ -348,7 +347,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
        int ret;
 
        btree_path_idx_t path_idx =
-               bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
+               bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0,
                              BTREE_ITER_INTENT, _THIS_IP_);
        ret = bch2_btree_path_traverse(trans, path_idx, 0);
        if (ret)
@@ -370,14 +369,14 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
        i->flags |= BTREE_TRIGGER_NORUN;
 
        btree_path_set_should_be_locked(btree_path);
-       ret = bch2_trans_update_by_path(trans, btree_path, i->k, flags, ip);
+       ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
 out:
        bch2_path_put(trans, path_idx, true);
        return ret;
 }
 
 static int __must_check
-bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
+bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
                          struct bkey_i *k, enum btree_update_flags flags,
                          unsigned long ip)
 {
@@ -385,6 +384,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
        struct btree_insert_entry *i, n;
        int cmp;
 
+       struct btree_path *path = trans->paths + path_idx;
        EBUG_ON(!path->should_be_locked);
        EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
        EBUG_ON(!bpos_eq(k->k.p, path->pos));
@@ -395,7 +395,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
                .btree_id       = path->btree_id,
                .level          = path->level,
                .cached         = path->cached,
-               .path           = path,
+               .path           = path_idx,
                .k              = k,
                .ip_allocated   = ip,
        };
@@ -419,7 +419,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
        if (!cmp && i < trans->updates + trans->nr_updates) {
                EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
 
-               bch2_path_put(trans, i->path->idx, true);
+               bch2_path_put(trans, i->path, true);
                i->flags        = n.flags;
                i->cached       = n.cached;
                i->k            = n.k;
@@ -443,7 +443,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
                }
        }
 
-       __btree_path_get(i->path, true);
+       __btree_path_get(trans->paths + i->path, true);
 
        /*
         * If a key is present in the key cache, it must also exist in the
@@ -453,7 +453,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
         * work:
         */
        if (path->cached && bkey_deleted(&i->old_k))
-               return flush_new_cached_update(trans, path, i, flags, ip);
+               return flush_new_cached_update(trans, i, flags, ip);
 
        return 0;
 }
@@ -501,7 +501,7 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
 int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
                                   struct bkey_i *k, enum btree_update_flags flags)
 {
-       struct btree_path *path = trans->paths + (iter->update_path ?: iter->path);
+       btree_path_idx_t path_idx = iter->update_path ?: iter->path;
        int ret;
 
        if (iter->flags & BTREE_ITER_IS_EXTENTS)
@@ -521,6 +521,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
        /*
         * Ensure that updates to cached btrees go to the key cache:
         */
+       struct btree_path *path = trans->paths + path_idx;
        if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
            !path->cached &&
            !path->level &&
@@ -529,10 +530,10 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
                if (ret)
                        return ret;
 
-               path = trans->paths + iter->key_cache_path;
+               path_idx = iter->key_cache_path;
        }
 
-       return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_);
+       return bch2_trans_update_by_path(trans, path_idx, k, flags, _RET_IP_);
 }
 
 int bch2_btree_insert_clone_trans(struct btree_trans *trans,
index 5c1ef9c848786ef87ffcc4b8c68c6f390fc94baa..fbb83e1aa2b2b5538a46d2dcd3da041a6182dbdc 100644 (file)
@@ -194,7 +194,7 @@ static inline int bch2_trans_commit(struct btree_trans *trans,
 static inline void bch2_trans_reset_updates(struct btree_trans *trans)
 {
        trans_for_each_update(trans, i)
-               bch2_path_put(trans, i->path->idx, true);
+               bch2_path_put(trans, i->path, true);
 
        trans->extra_journal_res        = 0;
        trans->nr_updates               = 0;