#include <linux/sort.h>
#include <linux/stat.h>
- #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
-void bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree)
-{
- if (btree >= BTREE_ID_NR_MAX)
- return;
+int bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree)
+{
u64 b = BIT_ULL(btree);
+ int ret = 0;
+
+ mutex_lock(&c->sb_lock);
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
if (!(c->sb.btrees_lost_data & b)) {
- bch_err(c, "flagging btree %s lost data", bch2_btree_id_str(btree));
+ struct printbuf buf = PRINTBUF;
+ bch2_btree_id_to_text(&buf, btree);
+ bch_err(c, "flagging btree %s lost data", buf.buf);
+ printbuf_exit(&buf);
+ ext->btrees_lost_data |= cpu_to_le64(b);
+ }
- mutex_lock(&c->sb_lock);
- bch2_sb_field_get(c->disk_sb.sb, ext)->btrees_lost_data |= cpu_to_le64(b);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
+ /* Once we have runtime self healing for topology errors we won't need this: */
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_topology) ?: ret;
+
+ /* Btree node accounting will be off: */
+ __set_bit_le64(BCH_FSCK_ERR_accounting_mismatch, ext->errors_silent);
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_allocations) ?: ret;
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ /*
+ * These are much more minor, and don't need to be corrected right away,
+ * but in debug mode we want the next fsck run to be clean:
+ */
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_lrus) ?: ret;
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_backpointers_to_extents) ?: ret;
+#endif
+
+ switch (btree) {
+ case BTREE_ID_alloc:
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret;
+
+ __set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent);
+ __set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent);
+ __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
+ __set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent);
+ __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent);
+ __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent);
+ goto out;
+ case BTREE_ID_backpointers:
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_btree_backpointers) ?: ret;
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_extents_to_backpointers) ?: ret;
+ goto out;
+ case BTREE_ID_need_discard:
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret;
+ goto out;
+ case BTREE_ID_freespace:
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret;
+ goto out;
+ case BTREE_ID_bucket_gens:
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret;
+ goto out;
+ case BTREE_ID_lru:
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret;
+ goto out;
+ case BTREE_ID_accounting:
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_allocations) ?: ret;
+ goto out;
+ default:
+ ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_scan_for_btree_nodes) ?: ret;
+ goto out;
}
+out:
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
+ return ret;
+}
+
+static void kill_btree(struct bch_fs *c, enum btree_id btree)
+{
+ bch2_btree_id_root(c, btree)->alive = false;
+ bch2_shoot_down_journal_keys(c, btree, 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
}
/* for -o reconstruct_alloc: */