return failed && failed->nr;
}
-static bool ptr_being_rewritten(struct bch_read_bio *orig,
- unsigned dev,
- unsigned flags)
+static inline struct data_update *rbio_data_update(struct bch_read_bio *rbio)
{
- if (!(flags & BCH_READ_data_update))
+ EBUG_ON(rbio->split);
+
+ return rbio->data_update
+ ? container_of(rbio, struct data_update, rbio)
+ : NULL;
+}
+
+static bool ptr_being_rewritten(struct bch_read_bio *orig, unsigned dev)
+{
+ struct data_update *u = rbio_data_update(orig);
+ if (!u)
return false;
- struct data_update *u = container_of(orig, struct data_update, rbio);
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(u->k.k));
unsigned i = 0;
bkey_for_each_ptr(ptrs, ptr) {
struct bpos pos,
struct extent_ptr_decoded *pick,
unsigned sectors,
- unsigned flags,
struct bch_read_bio *orig,
struct bch_io_failures *failed)
{
unsigned ptr_bit = 1;
bkey_for_each_ptr(ptrs, ptr) {
if (bch2_dev_io_failures(failed, ptr->dev) &&
- !ptr_being_rewritten(orig, ptr->dev, flags))
+ !ptr_being_rewritten(orig, ptr->dev))
update_opts.rewrite_ptrs |= ptr_bit;
ptr_bit <<= 1;
}
k.k->type == KEY_TYPE_reflink_v
? BTREE_ID_reflink
: BTREE_ID_extents,
- k, pos, pick, sectors, flags, orig, failed);
+ k, pos, pick, sectors, orig, failed);
if (!promote)
return NULL;
if (ret)
return ret;
- if (rbio->flags & BCH_READ_data_update)
+ if (rbio->data_update)
prt_str(out, "(internal move) ");
return 0;
flags &= ~BCH_READ_last_fragment;
flags |= BCH_READ_must_clone;
- int ret = flags & BCH_READ_data_update
+ int ret = rbio->data_update
? bch2_read_retry_nodecode(c, rbio, iter, &failed, flags)
: __bch2_read(c, rbio, iter, inum, &failed, flags);
bch2_inum_offset_err_msg_trans(trans, &buf,
(subvol_inum) { subvol, read_pos.inode },
read_pos.offset << 9));
- if (rbio->flags & BCH_READ_data_update)
+ if (rbio->data_update)
prt_str(&buf, "(internal move) ");
prt_str(&buf, "successful retry");
container_of(work, struct bch_read_bio, work);
struct bch_fs *c = rbio->c;
struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
- struct bio *src = &rbio->bio;
- struct bio *dst = &bch2_rbio_parent(rbio)->bio;
- struct bvec_iter dst_iter = rbio->bvec_iter;
+ struct bch_read_bio *parent = bch2_rbio_parent(rbio);
+ struct bio *src = &rbio->bio;
+ struct bio *dst = &parent->bio;
+ struct bvec_iter dst_iter = rbio->bvec_iter;
struct bch_extent_crc_unpacked crc = rbio->pick.crc;
struct nonce nonce = extent_nonce(rbio->version, crc);
unsigned nofs_flags;
if (unlikely(rbio->narrow_crcs))
bch2_rbio_narrow_crcs(rbio);
- if (likely(!(rbio->flags & BCH_READ_data_update))) {
+ if (likely(!parent->data_update)) {
/* Adjust crc to point to subset of data we want: */
crc.offset += rbio->offset_into_extent;
crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
struct bch_read_bio *rbio = NULL;
bool bounce = false, read_full = false, narrow_crcs = false;
struct bpos data_pos = bkey_start_pos(k.k);
+ struct data_update *u = rbio_data_update(orig);
int ret = 0;
if (bkey_extent_is_inline_data(k.k)) {
goto retry_pick;
}
- if (!(flags & BCH_READ_data_update)) {
+ if (likely(!u)) {
if (!(flags & BCH_READ_last_fragment) ||
bio_flagged(&orig->bio, BIO_CHAIN))
flags |= BCH_READ_must_clone;
bounce = true;
}
} else {
- read_full = true;
/*
* can happen if we retry, and the extent we were going to read
* has been merged in the meantime:
*/
- struct data_update *u = container_of(orig, struct data_update, rbio);
if (pick.crc.compressed_size > u->op.wbio.bio.bi_iter.bi_size) {
if (ca)
percpu_ref_put(&ca->io_ref);
}
iter.bi_size = pick.crc.compressed_size << 9;
+ read_full = true;
}
if (orig->opts.promote_target || have_io_error(failed))
if (rbio->bounce)
trace_and_count(c, io_read_bounce, &rbio->bio);
- if (!(flags & BCH_READ_data_update))
+ if (!u)
this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
else
this_cpu_add(c->counters[BCH_COUNTER_io_move_read], bio_sectors(&rbio->bio));
* If it's being moved internally, we don't want to flag it as a cache
* hit:
*/
- if (ca && pick.ptr.cached && !(flags & BCH_READ_data_update))
+ if (ca && pick.ptr.cached && !u)
bch2_bucket_io_time_reset(trans, pick.ptr.dev,
PTR_BUCKET_NR(ca, &pick.ptr), READ);
this_cpu_add(c->counters[BCH_COUNTER_io_read_hole],
bvec_iter_sectors(iter));
/*
- * won't normally happen in the BCH_READ_data_update
- * (bch2_move_extent()) path, but if we retry and the extent we wanted
- * to read no longer exists we have to signal that:
+ * won't normally happen in the data update (bch2_move_extent()) path,
+ * but if we retry and the extent we wanted to read no longer exists we
+ * have to signal that:
*/
- if (flags & BCH_READ_data_update)
+ if (u)
orig->ret = -BCH_ERR_data_read_key_overwritten;
zero_fill_bio_iter(&orig->bio, iter);
struct bkey_s_c k;
int ret;
- BUG_ON(flags & BCH_READ_data_update);
+ EBUG_ON(rbio->data_update);
bch2_bkey_buf_init(&sk);
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,