struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
if (!ca)
- return -EIO;
+ return -BCH_ERR_trigger_alloc;
struct bch_alloc_v4 old_a_convert;
const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
invalid_bucket:
bch2_fs_inconsistent(c, "reference to invalid bucket\n %s",
(bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf));
- ret = -EIO;
+ ret = -BCH_ERR_trigger_alloc;
goto err;
}
void bch2_open_bucket_write_error(struct bch_fs *c,
struct open_buckets *obs,
- unsigned dev)
+ unsigned dev, int err)
{
struct open_bucket *ob;
unsigned i;
open_bucket_for_each(c, obs, ob, i)
if (ob->dev == dev && ob->ec)
- bch2_ec_bucket_cancel(c, ob);
+ bch2_ec_bucket_cancel(c, ob, err);
}
static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
}
void bch2_open_bucket_write_error(struct bch_fs *,
- struct open_buckets *, unsigned);
+ struct open_buckets *, unsigned, int);
void __bch2_open_bucket_put(struct bch_fs *, struct open_bucket *);
prt_str(&buf, ")");
WARN_RATELIMIT(1, "%s", buf.buf);
printbuf_exit(&buf);
- return -EIO;
+ return -BCH_ERR_recompute_checksum;
}
for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
size_t src_len = src->bi_iter.bi_size;
size_t dst_len = crc.uncompressed_size << 9;
void *workspace;
- int ret;
+ int ret = 0, ret2;
enum bch_compression_opts opt = bch2_compression_type_to_opt(crc.compression_type);
mempool_t *workspace_pool = &c->compress_workspace[opt];
else
ret = -BCH_ERR_compression_workspace_not_initialized;
if (ret)
- goto out;
+ goto err;
}
src_data = bio_map_or_bounce(c, src, READ);
switch (crc.compression_type) {
case BCH_COMPRESSION_TYPE_lz4_old:
case BCH_COMPRESSION_TYPE_lz4:
- ret = LZ4_decompress_safe_partial(src_data.b, dst_data,
- src_len, dst_len, dst_len);
- if (ret != dst_len)
- goto err;
+ ret2 = LZ4_decompress_safe_partial(src_data.b, dst_data,
+ src_len, dst_len, dst_len);
+ if (ret2 != dst_len)
+ ret = -BCH_ERR_decompress_lz4;
break;
case BCH_COMPRESSION_TYPE_gzip: {
z_stream strm = {
zlib_set_workspace(&strm, workspace);
zlib_inflateInit2(&strm, -MAX_WBITS);
- ret = zlib_inflate(&strm, Z_FINISH);
+ ret2 = zlib_inflate(&strm, Z_FINISH);
mempool_free(workspace, workspace_pool);
- if (ret != Z_STREAM_END)
- goto err;
+ if (ret2 != Z_STREAM_END)
+ ret = -BCH_ERR_decompress_gzip;
break;
}
case BCH_COMPRESSION_TYPE_zstd: {
ZSTD_DCtx *ctx;
size_t real_src_len = le32_to_cpup(src_data.b);
- if (real_src_len > src_len - 4)
+ if (real_src_len > src_len - 4) {
+ ret = -BCH_ERR_decompress_zstd_src_len_bad;
goto err;
+ }
workspace = mempool_alloc(workspace_pool, GFP_NOFS);
ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound());
- ret = zstd_decompress_dctx(ctx,
+ ret2 = zstd_decompress_dctx(ctx,
dst_data, dst_len,
src_data.b + 4, real_src_len);
mempool_free(workspace, workspace_pool);
- if (ret != dst_len)
- goto err;
+ if (ret2 != dst_len)
+ ret = -BCH_ERR_decompress_zstd;
break;
}
default:
BUG();
}
- ret = 0;
+err:
fsck_err:
-out:
bio_unmap_or_unbounce(c, src_data);
return ret;
-err:
- ret = -EIO;
- goto out;
}
int bch2_bio_uncompress_inplace(struct bch_write_op *op,
BUG_ON(!bio->bi_vcnt);
BUG_ON(DIV_ROUND_UP(crc->live_size, PAGE_SECTORS) > bio->bi_max_vecs);
- if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max ||
- crc->compressed_size << 9 > c->opts.encoded_extent_max) {
- bch2_write_op_error(op, op->pos.offset, "extent too big to decompress");
- return -EIO;
+ if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max) {
+ bch2_write_op_error(op, op->pos.offset,
+ "extent too big to decompress (%u > %u)",
+ crc->uncompressed_size << 9, c->opts.encoded_extent_max);
+ return -BCH_ERR_decompress_exceeded_max_encoded_extent;
}
data = __bounce_alloc(c, dst_len, WRITE);
- if (__bio_uncompress(c, bio, data.b, *crc)) {
- if (!c->opts.no_data_io)
- bch2_write_op_error(op, op->pos.offset, "decompression error");
- ret = -EIO;
+ ret = __bio_uncompress(c, bio, data.b, *crc);
+
+ if (c->opts.no_data_io)
+ ret = 0;
+
+ if (ret) {
+ bch2_write_op_error(op, op->pos.offset, "%s", bch2_err_str(ret));
goto err;
}
if (crc.uncompressed_size << 9 > c->opts.encoded_extent_max ||
crc.compressed_size << 9 > c->opts.encoded_extent_max)
- return -EIO;
+ return -BCH_ERR_decompress_exceeded_max_encoded_extent;
dst_data = dst_len == dst_iter.bi_size
? __bio_map_or_bounce(c, dst, dst_iter, WRITE)
printbuf_exit(&buf);
bch2_fatal_error(c);
- ret = -EIO;
+ ret = -BCH_ERR_invalid_bkey;
goto out;
}
bch2_fs_inconsistent(c, "%s", buf.buf);
printbuf_exit(&buf);
- return -EIO;
+ return -BCH_ERR_erasure_coding_found_btree_node;
}
k = bch2_backpointer_get_key(trans, bp, &iter, BTREE_ITER_intent, last_flushed);
struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev);
if (!ca)
- return -EIO;
+ return -BCH_ERR_ENOENT_dev_not_found;
struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr);
{
struct btree_trans *trans = bch2_trans_get(c);
struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
- unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
- int ret = 0;
+ unsigned nr_data = v->nr_blocks - v->nr_redundant;
- ret = bch2_btree_write_buffer_flush_sync(trans);
+ int ret = bch2_btree_write_buffer_flush_sync(trans);
if (ret)
goto err;
- for (i = 0; i < nr_data; i++) {
+ for (unsigned i = 0; i < nr_data; i++) {
ret = ec_stripe_update_bucket(trans, s, i);
if (ret)
break;
}
err:
bch2_trans_put(trans);
-
return ret;
}
ec_stripe_new_set_pending(c, h);
}
-void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
+void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob, int err)
{
struct ec_stripe_new *s = ob->ec;
- s->err = -EIO;
+ s->err = err;
}
void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
-void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *);
+void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *, int);
int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
x(ENOENT, ENOENT_snapshot_tree) \
x(ENOENT, ENOENT_dirent_doesnt_match_inode) \
x(ENOENT, ENOENT_dev_not_found) \
+ x(ENOENT, ENOENT_dev_bucket_not_found) \
x(ENOENT, ENOENT_dev_idx_not_found) \
x(ENOENT, ENOENT_inode_no_backpointer) \
x(ENOENT, ENOENT_no_snapshot_tree_subvol) \
x(EINVAL, no_resize_with_buckets_nouse) \
x(EINVAL, inode_unpack_error) \
x(EINVAL, varint_decode_error) \
+ x(EINVAL, erasure_coding_found_btree_node) \
x(EOPNOTSUPP, may_not_use_incompat_feature) \
x(EROFS, erofs_trans_commit) \
x(EROFS, erofs_no_writes) \
x(BCH_ERR_operation_blocked, nocow_lock_blocked) \
x(EIO, journal_shutdown) \
x(EIO, journal_flush_err) \
+ x(EIO, journal_write_err) \
x(EIO, btree_node_read_err) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_cached) \
x(EIO, sb_not_downgraded) \
x(EIO, btree_node_read_validate_error) \
x(EIO, btree_need_topology_repair) \
x(EIO, bucket_ref_update) \
+ x(EIO, trigger_alloc) \
x(EIO, trigger_pointer) \
x(EIO, trigger_stripe_pointer) \
x(EIO, metadata_bucket_inconsistency) \
x(EIO, EIO_fault_injected) \
x(EIO, ec_block_read) \
x(EIO, ec_block_write) \
- x(EIO, data_read) \
+ x(EIO, recompute_checksum) \
+ x(EIO, decompress) \
+ x(BCH_ERR_decompress, decompress_exceeded_max_encoded_extent) \
+ x(BCH_ERR_decompress, decompress_lz4) \
+ x(BCH_ERR_decompress, decompress_gzip) \
+ x(BCH_ERR_decompress, decompress_zstd_src_len_bad) \
+ x(BCH_ERR_decompress, decompress_zstd) \
+ x(EIO, data_write) \
+ x(BCH_ERR_data_write, data_write_io) \
+ x(BCH_ERR_data_write, data_write_csum) \
+ x(BCH_ERR_data_write, data_write_invalid_ptr) \
+ x(BCH_ERR_data_write, data_write_misaligned) \
+ x(BCH_ERR_decompress, data_read) \
x(BCH_ERR_data_read, no_device_to_read_from) \
x(BCH_ERR_data_read, data_read_io_err) \
x(BCH_ERR_data_read, data_read_csum_err) \
bch2_fs_inconsistent(c,
"inode %llu:%u not found when deleting",
inum.inum, snapshot);
- ret = -EIO;
+ ret = -BCH_ERR_ENOENT_inode;
goto err;
}
bch2_fs_inconsistent(c,
"inode %llu:%u not found when deleting",
inum, snapshot);
- ret = -EIO;
+ ret = -BCH_ERR_ENOENT_inode;
goto err;
}
test_bit(ptr->dev, op->failed.d));
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
- return -EIO;
+ return -BCH_ERR_data_write_io;
}
if (dst != src)
out:
/* If some a bucket wasn't written, we can't erasure code it: */
for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
- bch2_open_bucket_write_error(c, &op->open_buckets, dev);
+ bch2_open_bucket_write_error(c, &op->open_buckets, dev, -BCH_ERR_data_write_io);
bch2_open_buckets_put(c, &op->open_buckets);
return;
return 0;
csum_err:
bch2_write_csum_err_msg(op);
- return -EIO;
+ return -BCH_ERR_data_write_csum;
}
static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
return more;
csum_err:
bch2_write_csum_err_msg(op);
- ret = -EIO;
+ ret = -BCH_ERR_data_write_csum;
err:
if (to_wbio(dst)->bounce)
bch2_bio_free_pages_pool(c, dst);
static void __bch2_nocow_write_done(struct bch_write_op *op)
{
if (unlikely(op->flags & BCH_WRITE_io_error)) {
- op->error = -EIO;
+ op->error = -BCH_ERR_data_write_io;
} else if (unlikely(op->flags & BCH_WRITE_convert_unwritten))
bch2_nocow_write_convert_unwritten(op);
}
"pointer to invalid bucket in nocow path on device %llu\n %s",
stale_at->b.inode,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_data_write_invalid_ptr;
} else {
/* We can retry this: */
ret = -BCH_ERR_transaction_restart;
if (unlikely(bio->bi_iter.bi_size & (c->opts.block_size - 1))) {
bch2_write_op_error(op, op->pos.offset, "misaligned write");
- op->error = -EIO;
+ op->error = -BCH_ERR_data_write_misaligned;
goto err;
}
if (!w->devs_written.nr) {
bch_err(c, "unable to write journal to sufficient devices");
- err = -EIO;
+ err = -BCH_ERR_journal_write_err;
} else {
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
w->devs_written);
* @j: journal object
* @direct: direct or background reclaim?
* @kicked: requested to run since we last ran?
- * Returns: 0 on success, or -EIO if the journal has been shutdown
*
* Background journal reclaim writes out btree nodes. It should be run
* early enough so that we never completely run out of journal buckets.
if (kthread && kthread_should_stop())
break;
- if (bch2_journal_error(j)) {
- ret = -EIO;
+ ret = bch2_journal_error(j);
+ if (ret)
break;
- }
bch2_journal_do_discards(j);