From 43c7ede0095d7020ca03113b2fde84b00dd5cd49 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 8 Nov 2023 22:04:29 -0500 Subject: [PATCH] bcachefs: Kill BTREE_UPDATE_PREJOURNAL With the previous patch that reworks BTREE_INSERT_JOURNAL_REPLAY, we can now switch the btree write buffer to use it for flushing. This has the advantage that transaction commits don't need to take a journal reservation at all. Signed-off-by: Kent Overstreet --- fs/bcachefs/bkey_methods.h | 2 -- fs/bcachefs/btree_trans_commit.c | 7 +------ fs/bcachefs/btree_types.h | 1 - fs/bcachefs/btree_update.c | 23 ----------------------- fs/bcachefs/btree_write_buffer.c | 14 ++++++++++---- 5 files changed, 11 insertions(+), 36 deletions(-) diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h index 3a370b7087ac..912adadfb4dd 100644 --- a/fs/bcachefs/bkey_methods.h +++ b/fs/bcachefs/bkey_methods.h @@ -93,7 +93,6 @@ static inline int bch2_mark_key(struct btree_trans *trans, enum btree_update_flags { __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE = __BTREE_ITER_FLAGS_END, __BTREE_UPDATE_NOJOURNAL, - __BTREE_UPDATE_PREJOURNAL, __BTREE_UPDATE_KEY_CACHE_RECLAIM, __BTREE_TRIGGER_NORUN, /* Don't run triggers at all */ @@ -108,7 +107,6 @@ enum btree_update_flags { #define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) #define BTREE_UPDATE_NOJOURNAL (1U << __BTREE_UPDATE_NOJOURNAL) -#define BTREE_UPDATE_PREJOURNAL (1U << __BTREE_UPDATE_PREJOURNAL) #define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM) #define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN) diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c index 6c5510c4a2c4..403b7310d21a 100644 --- a/fs/bcachefs/btree_trans_commit.c +++ b/fs/bcachefs/btree_trans_commit.c @@ -778,12 +778,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, trans_for_each_update(trans, i) { if (!i->cached) { - u64 seq = trans->journal_res.seq; - - if (i->flags & BTREE_UPDATE_PREJOURNAL) - seq = i->seq; - - bch2_btree_insert_key_leaf(trans, i->path, i->k, seq); + bch2_btree_insert_key_leaf(trans, i->path, i->k, trans->journal_res.seq); } else if (!i->key_cache_already_flushed) bch2_btree_insert_key_cached(trans, flags, i); else { diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 60453ba86c4b..b667da4e8403 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -356,7 +356,6 @@ struct btree_insert_entry { u8 old_btree_u64s; struct bkey_i *k; struct btree_path *path; - u64 seq; /* key being overwritten: */ struct bkey old_k; const struct bch_val *old_v; diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c index 2fd3c8cc6f51..82f85e3a5787 100644 --- a/fs/bcachefs/btree_update.c +++ b/fs/bcachefs/btree_update.c @@ -381,21 +381,12 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path, { struct bch_fs *c = trans->c; struct btree_insert_entry *i, n; - u64 seq = 0; int cmp; EBUG_ON(!path->should_be_locked); EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX); EBUG_ON(!bpos_eq(k->k.p, path->pos)); - /* - * The transaction journal res hasn't been allocated at this point. - * That occurs at commit time. Reuse the seq field to pass in the seq - * of a prejournaled key. - */ - if (flags & BTREE_UPDATE_PREJOURNAL) - seq = trans->journal_res.seq; - n = (struct btree_insert_entry) { .flags = flags, .bkey_type = __btree_node_type(path->level, path->btree_id), @@ -404,7 +395,6 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path, .cached = path->cached, .path = path, .k = k, - .seq = seq, .ip_allocated = ip, }; @@ -432,7 +422,6 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path, i->cached = n.cached; i->k = n.k; i->path = n.path; - i->seq = n.seq; i->ip_allocated = n.ip_allocated; } else { array_insert_item(trans->updates, trans->nr_updates, @@ -543,18 +532,6 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_); } -/* - * Add a transaction update for a key that has already been journaled. - */ -int __must_check bch2_trans_update_seq(struct btree_trans *trans, u64 seq, - struct btree_iter *iter, struct bkey_i *k, - enum btree_update_flags flags) -{ - trans->journal_res.seq = seq; - return bch2_trans_update(trans, iter, k, flags|BTREE_UPDATE_NOJOURNAL| - BTREE_UPDATE_PREJOURNAL); -} - static noinline int bch2_btree_insert_clone_trans(struct btree_trans *trans, enum btree_id btree, struct bkey_i *k) diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c index 9609eb18f38d..7f3147e064a5 100644 --- a/fs/bcachefs/btree_write_buffer.c +++ b/fs/bcachefs/btree_write_buffer.c @@ -78,12 +78,15 @@ static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans, } return 0; trans_commit: - return bch2_trans_update_seq(trans, wb->journal_seq, iter, &wb->k, - BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: + trans->journal_res.seq = wb->journal_seq; + + return bch2_trans_update(trans, iter, &wb->k, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: bch2_trans_commit(trans, NULL, NULL, commit_flags| BTREE_INSERT_NOCHECK_RW| BTREE_INSERT_NOFAIL| + BTREE_INSERT_JOURNAL_REPLAY| BTREE_INSERT_JOURNAL_RECLAIM); } @@ -127,9 +130,11 @@ btree_write_buffered_insert(struct btree_trans *trans, bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k), BTREE_ITER_CACHED|BTREE_ITER_INTENT); + trans->journal_res.seq = wb->journal_seq; + ret = bch2_btree_iter_traverse(&iter) ?: - bch2_trans_update_seq(trans, wb->journal_seq, &iter, &wb->k, - BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE); + bch2_trans_update(trans, &iter, &wb->k, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE); bch2_trans_iter_exit(trans, &iter); return ret; } @@ -262,6 +267,7 @@ slowpath: ret = commit_do(trans, NULL, NULL, commit_flags| BTREE_INSERT_NOFAIL| + BTREE_INSERT_JOURNAL_REPLAY| BTREE_INSERT_JOURNAL_RECLAIM, btree_write_buffered_insert(trans, i)); if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret))) -- 2.49.0