* they must remember to pair any call to bio_init() with bio_uninit()
* when IO has completed, or when the bio is released.
*/
-void bio_init(struct bio *bio, struct bio_vec *table,
+void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
unsigned short max_vecs)
{
memset(bio, 0, sizeof(*bio));
atomic_set(&bio->__bi_remaining, 1);
atomic_set(&bio->__bi_cnt, 1);
+ if (bdev)
+ bio_set_dev(bio, bdev);
bio->bi_io_vec = table;
bio->bi_max_vecs = max_vecs;
}
goto err_free;
bio->bi_flags |= idx << BVEC_POOL_OFFSET;
- bio_init(bio, bvl, bvec_nr_vecs(idx));
+ bio_init(bio, bdev, bvl, bvec_nr_vecs(idx));
} else if (nr_iovecs) {
- bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
+ bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS);
} else {
- bio_init(bio, NULL, 0);
+ bio_init(bio, bdev, NULL, 0);
}
- if (bdev)
- bio_set_dev(bio, bdev);
bio->bi_pool = bs;
return bio;
bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
if (unlikely(!bio))
return NULL;
- bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
- if (bdev)
- bio_set_dev(bio, bdev);
+ bio_init(bio, bdev, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
bio->bi_pool = NULL;
return bio;
}
{
struct bio bio;
- bio_init(&bio, NULL, 0);
- bio_set_dev(&bio, bdev);
+ bio_init(&bio, bdev, NULL, 0);
bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
return submit_bio_wait(&bio);
}
cbdata.drive = drive;
- bio_init(&bio, &bio_vec, 1);
- bio_set_dev(&bio, bdev);
+ bio_init(&bio, bdev, &bio_vec, 1);
bio_add_page(&bio, page, block_size(bdev), 0);
bio.bi_iter.bi_sector = 0;
continue;
}
- bio_init(&bio, &bio_vec, 1);
- bio_set_dev(&bio, zram->bdev);
+ bio_init(&bio, zram->bdev, &bio_vec, 1);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
if (!page)
return -ENOMEM;
- bio_init(&bio, &bio_vec, 1);
+ bio_init(&bio, NULL, &bio_vec, 1);
bio_add_page(&bio, page, PAGE_SIZE, 0);
bio_set_op_attrs(&bio, REQ_OP_READ, 0);
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
- bio_set_dev(bio, c->cache->bdev);
-
+ bio_init(bio, c->cache->bdev, bio->bi_inline_vecs,
+ meta_bucket_pages(&c->cache->sb));
return bio;
}
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
- bio_init(bio, bio->bi_inline_vecs, 1);
+ bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1);
bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
bio_reset(bio);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
{
struct bio *bio = &io->bio.bio;
- bio_init(bio, bio->bi_inline_vecs,
+ bio_init(bio, io->op.c->cache->bdev, bio->bi_inline_vecs,
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
- bio_set_dev(bio, io->op.c->cache->bdev);
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
{
struct bio *bio = &s->bio.bio;
- bio_init(bio, NULL, 0);
- bio->bi_bdev = orig_bio->bi_bdev;
+ bio_init(bio, orig_bio->bi_bdev, NULL, 0);
__bio_clone_fast(bio, orig_bio);
/*
* bi_end_io can be set separately somewhere else, e.g. the
down(&dc->sb_write_mutex);
closure_init(cl, parent);
- bio_init(bio, dc->sb_bv, 1);
- bio_set_dev(bio, dc->bdev);
+ bio_init(bio, dc->bdev, dc->sb_bv, 1);
bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc;
if (ca->sb.version < version)
ca->sb.version = version;
- bio_init(bio, ca->sb_bv, 1);
- bio_set_dev(bio, ca->bdev);
+ bio_init(bio, ca->bdev, ca->sb_bv, 1);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
+ bio_init(&ca->journal.bio, ca->bdev, ca->journal.bio.bi_inline_vecs, 8);
/*
* when ca->sb.njournal_buckets is not zero, journal exists,
struct bio bio;
};
-static void dirty_init(struct keybuf_key *w)
+static void dirty_init(struct keybuf_key *w, struct block_device *bdev)
{
struct dirty_io *io = w->private;
struct bio *bio = &io->bio;
- bio_init(bio, bio->bi_inline_vecs,
+ bio_init(bio, bdev, bio->bi_inline_vecs,
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
* to clean up.
*/
if (KEY_DIRTY(&w->key)) {
- dirty_init(w);
+ dirty_init(w, io->dc->bdev);
bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
io->bio.bi_iter.bi_sector = KEY_START(&w->key);
- bio_set_dev(&io->bio, io->dc->bdev);
io->bio.bi_end_io = dirty_endio;
/* I/O request sent to backing device */
io->dc = dc;
io->sequence = sequence++;
- dirty_init(w);
+ dirty_init(w, dc->disk.c->cache->bdev);
bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
- bio_set_dev(&io->bio, dc->disk.c->cache->bdev);
io->bio.bi_end_io = read_dirty_endio;
if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
struct bio *flush_bio = &s->flush_bio;
bio_reset(flush_bio);
- bio_set_dev(flush_bio, s->origin->bdev);
flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
return submit_bio_wait(flush_bio);
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
- bio_init(&s->flush_bio, NULL, 0);
+ bio_init(&s->flush_bio, s->origin->bdev, NULL, 0);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
pool->low_water_triggered = false;
pool->suspended = true;
pool->out_of_data_space = false;
- bio_init(&pool->flush_bio, NULL, 0);
+ bio_init(&pool->flush_bio, data_dev, NULL, 0);
pool->shared_read_ds = dm_deferred_set_create();
if (!pool->shared_read_ds) {
struct bio *flush_bio = &pool->flush_bio;
bio_reset(flush_bio);
- bio_set_dev(flush_bio, pool->data_dev);
flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
return submit_bio_wait(flush_bio);
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
- bio_init(&flush_bio, NULL, 0);
+ bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
- bio_set_dev(&flush_bio, ci->io->md->disk->part0);
ci->bio = &flush_bio;
ci->sector_count = 0;
}
multipath = conf->multipaths + mp_bh->path;
- bio_init(&mp_bh->bio, NULL, 0);
+ bio_init(&mp_bh->bio, multipath->rdev->bdev, NULL, 0);
__bio_clone_fast(&mp_bh->bio, bio);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
- bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
struct bio bio;
struct bio_vec bvec;
- bio_init(&bio, &bvec, 1);
-
- if (metadata_op && rdev->meta_bdev)
- bio_set_dev(&bio, rdev->meta_bdev);
- else
- bio_set_dev(&bio, rdev->bdev);
+ bio_init(&bio, (metadata_op && rdev->meta_bdev) ?
+ rdev->meta_bdev : rdev->bdev, &bvec, 1);
bio.bi_opf = op | op_flags;
if (metadata_op)
bio.bi_iter.bi_sector = sector + rdev->sb_start;
if (!do_flush)
return;
bio_reset(&log->flush_bio);
- bio_set_dev(&log->flush_bio, log->rdev->bdev);
log->flush_bio.bi_end_io = r5l_log_flush_endio;
log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio);
INIT_LIST_HEAD(&log->io_end_ios);
INIT_LIST_HEAD(&log->flushing_ios);
INIT_LIST_HEAD(&log->finished_ios);
- bio_init(&log->flush_bio, NULL, 0);
+ bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0);
log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
if (!log->io_kc)
INIT_LIST_HEAD(&io->stripe_list);
atomic_set(&io->pending_stripes, 0);
atomic_set(&io->pending_flushes, 0);
- bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
+ bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS);
pplhdr = page_address(io->header_page);
clear_page(pplhdr);
bio->bi_end_io = ppl_log_endio;
bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
- bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->next_io_sector;
bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
bio->bi_write_hint = ppl_conf->write_hint;
for (i = 0; i < disks; i++) {
struct r5dev *dev = &sh->dev[i];
- bio_init(&dev->req, &dev->vec, 1);
- bio_init(&dev->rreq, &dev->rvec, 1);
+ /*
+ * No bdev available yet, will be initialized later
+ * before adding pages to the bio.
+ */
+ bio_init(&dev->req, NULL, &dev->vec, 1);
+ bio_init(&dev->rreq, NULL, &dev->rvec, 1);
}
if (raid5_has_ppl(conf)) {
if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
bio = &req->b.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
- bio_set_dev(bio, req->ns->bdev);
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec));
} else {
bio = bio_alloc(req->ns->bdev, sg_cnt, GFP_KERNEL);
}
if (!nvmet_check_transfer_len(req, 0))
return;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
- bio_set_dev(bio, req->ns->bdev);
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec));
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
bio = &req->p.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio_init(bio, NULL, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec));
} else {
bio = bio_alloc(NULL, req->sg_cnt, GFP_KERNEL);
bio->bi_end_io = bio_put;
return -ENOMEM;
}
- bio_init(&bio, vecs, nr_pages);
- bio_set_dev(&bio, bdev);
+ bio_init(&bio, bdev, vecs, nr_pages);
bio.bi_iter.bi_sector = pos >> 9;
bio.bi_write_hint = iocb->ki_hint;
bio.bi_private = current;
struct bio_vec bvec;
struct bio bio;
- bio_init(&bio, &bvec, 1);
+ bio_init(&bio, iomap->bdev, &bvec, 1);
bio.bi_opf = REQ_OP_READ;
bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
- bio_set_dev(&bio, iomap->bdev);
__bio_add_page(&bio, page, plen, poff);
return submit_bio_wait(&bio);
}
return;
}
- bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE));
- bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev);
+ bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
+ howmany(count, PAGE_SIZE));
iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
iclog->ic_bio.bi_end_io = xlog_bio_end_io;
iclog->ic_bio.bi_private = iclog;
if (!page)
return -ENOMEM;
- bio_init(&bio, &bio_vec, 1);
+ bio_init(&bio, sb->s_bdev, &bio_vec, 1);
bio.bi_iter.bi_sector = 0;
bio.bi_opf = REQ_OP_READ;
- bio_set_dev(&bio, sb->s_bdev);
bio_add_page(&bio, page, PAGE_SIZE, 0);
ret = submit_bio_wait(&bio);
extern int submit_bio_wait(struct bio *bio);
extern void bio_advance(struct bio *, unsigned);
-extern void bio_init(struct bio *bio, struct bio_vec *table,
- unsigned short max_vecs);
+void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
+ unsigned short max_vecs);
extern void bio_uninit(struct bio *);
extern void bio_reset(struct bio *);
void bio_chain(struct bio *, struct bio *);