v->keys.ops = b->keys.ops;
bio = bch_bbio_alloc(b->c);
- bio_set_dev(bio, c->cache->bdev);
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
bio->bi_opf = REQ_OP_READ | REQ_META;
struct bio *bio = &b->bio;
bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
+ bio_set_dev(bio, c->cache->bdev);
return bio;
}
struct bbio *b = container_of(bio, struct bbio, bio);
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
- bio_set_dev(bio, c->cache->bdev);
-
b->submit_time_us = local_clock_us();
closure_bio_submit(c, bio, bio->bi_private);
}
bio_init(bio, bio->bi_inline_vecs,
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
+ bio_set_dev(bio, io->op.c->cache->bdev);
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
* and reread from the backing device (but we don't pass that
* error up anywhere).
*/
-
+ bio_set_dev(n, b->c->cache->bdev);
__bch_submit_bbio(n, b->c);
return n == bio ? MAP_DONE : MAP_CONTINUE;
}