case 65 ... 128:
*idx = 4;
break;
- case 129 ... BIO_MAX_PAGES:
+ default:
*idx = 5;
break;
- default:
- return NULL;
}
/*
void bio_reset(struct bio *bio)
{
unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
+ struct block_device *bdev = bio->bi_bdev;
bio_uninit(bio);
memset(bio, 0, BIO_RESET_BYTES);
bio->bi_flags = flags;
atomic_set(&bio->__bi_remaining, 1);
+ bio_set_dev(bio, bdev);
}
EXPORT_SYMBOL(bio_reset);
/**
* bio_alloc_bioset - allocate a bio for I/O
- * @gfp_mask: the GFP_* mask given to the slab allocator
+ * @bdev: block device to allocate the bio for.
* @nr_iovecs: number of iovecs to pre-allocate
+ * @gfp_mask: the GFP_* mask given to the slab allocator
* @bs: the bio_set to allocate from.
*
* Allocate a bio from the mempools in @bs.
*
+ * If @nr_iovecs is larger than BIO_MAX_PAGES the bio will still fit no more
+ * than BIO_MAX_PAGES segments.
+ *
* If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
* allocate a bio. This is due to the mempool guarantees. To make this work,
* callers must never allocate more than 1 bio at a time from the general pool.
*
* Returns: Pointer to new bio on success, NULL on failure.
*/
-struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
- struct bio_set *bs)
+struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned int nr_iovecs,
+ gfp_t gfp_mask, struct bio_set *bs)
{
gfp_t saved_gfp = gfp_mask;
struct bio *bio;
bio_init(bio, NULL, 0);
}
+ if (bdev)
+ bio_set_dev(bio, bdev);
bio->bi_pool = bs;
return bio;
{
struct bio *b;
- b = bio_alloc_bioset(gfp_mask, 0, bs);
+ b = bio_alloc_bioset(bdev, 0, gfp_mask, bs);
if (!b)
return NULL;
-
- if (bdev)
- bio_set_dev(bio, bdev);
__bio_clone_fast(b, bio);
if (bio_crypt_clone(b, bio, gfp_mask) < 0)
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int op, unsigned int nr_pages, gfp_t gfp)
{
- struct bio *new = bio_alloc(gfp, nr_pages);
+ struct bio *new = bio_alloc(bdev, nr_pages, gfp);
- bio_set_dev(new, bdev);
new->bi_opf = op;
if (bio) {
* __bio_clone_fast() anyways.
*/
- bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
+ bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src),
+ gfp_mask, bs);
if (!bio)
return NULL;
- bio->bi_bdev = bio_src->bi_bdev;
+
if (bio_flagged(bio_src, BIO_REMAPPED))
bio_set_flag(bio, BIO_REMAPPED);
bio->bi_opf = bio_src->bi_opf;
op_flags |= REQ_FUA | REQ_PREFLUSH;
op_flags |= REQ_SYNC;
- bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
- bio_set_dev(bio, bdev->md_bdev);
+ bio = bio_alloc_bioset(bdev->md_bdev, 1, GFP_NOIO, &drbd_md_io_bio_set);
bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
- struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
+ struct bio *bio;
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
struct page *page;
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
- bio_set_dev(bio, device->ldev->md_bdev);
+
+ bio = bio_alloc_bioset(device->ldev->md_bdev, 1, GFP_NOIO,
+ &drbd_md_io_bio_set);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{
- struct bio *bio = bio_alloc(GFP_NOIO, 0);
+ struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0, GFP_NOIO);
struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
if (!bio || !octx) {
drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
octx->device = device;
octx->ctx = ctx;
- bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
* generated bio, but a bio allocated on behalf of the peer.
*/
next_bio:
- bio = bio_alloc(GFP_NOIO, nr_pages);
+ bio = bio_alloc(device->ldev->backing_bdev, nr_pages, GFP_NOIO);
if (!bio) {
drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages);
goto fail;
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, device->ldev->backing_bdev);
- bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
/**
* rnbd_bio_map_kern - map kernel address into bio
* @data: pointer to buffer to map
- * @bs: bio_set to use.
+ * @dev: device to use.
* @len: length in bytes
* @gfp_mask: allocation flags for bio allocation
*
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
+struct bio *rnbd_bio_map_kern(void *data, struct rnbd_dev *dev,
unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
int offset, i;
struct bio *bio;
- bio = bio_alloc_bioset(gfp_mask, nr_pages, bs);
+ bio = bio_alloc_bioset(dev->bdev, nr_pages, gfp_mask, dev->ibd_bio_set);
if (!bio)
return ERR_PTR(-ENOMEM);
void rnbd_dev_bi_end_io(struct bio *bio);
-struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
+struct bio *rnbd_bio_map_kern(void *data, struct rnbd_dev *dev,
unsigned int len, gfp_t gfp_mask);
static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
priv->id = id;
/* Generate bio with pages pointing to the rdma buffer */
- bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL);
+ bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev, datalen, GFP_KERNEL);
if (IS_ERR(bio)) {
err = PTR_ERR(bio);
rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
bio_set_prio(bio, prio);
- bio_set_dev(bio, sess_dev->rnbd_dev->bdev);
submit_bio(bio);
seg[i].nsec << 9,
seg[i].offset) == 0)) {
- int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
- bio = bio_alloc(GFP_KERNEL, nr_iovecs);
+ bio = bio_alloc(preq.bdev, nseg - i, GFP_KERNEL);
if (unlikely(bio == NULL))
goto fail_put_bio;
biolist[nbio++] = bio;
- bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
if (!bio) {
BUG_ON(operation_flags != REQ_PREFLUSH);
- bio = bio_alloc(GFP_KERNEL, 0);
+ bio = bio_alloc(preq.bdev, 0, GFP_KERNEL);
if (unlikely(bio == NULL))
goto fail_put_bio;
biolist[nbio++] = bio;
- bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio_set_op_attrs(bio, operation, operation_flags);
{
struct bio *bio;
- bio = bio_alloc(GFP_ATOMIC, 1);
+ bio = bio_alloc(ram->bdev, 1, GFP_ATOMIC);
if (!bio)
return -ENOMEM;
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
- bio_set_dev(bio, zram->bdev);
if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
bio_put(bio);
return -EIO;
}
packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
- bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
+ bio = bio_alloc(pblk->disk->part0, secs_to_sync + packed_meta_pgs,
+ GFP_KERNEL);
bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
- cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+ cache_bio = bio_alloc_bioset(miss->bi_bdev,
DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
- &dc->disk.bio_split);
+ GFP_NOWAIT, &dc->disk.bio_split);
if (!cache_bio)
goto out_submit;
cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
- bio_copy_dev(cache_bio, miss);
cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = backing_request_endio;
*/
struct bio *flush;
- flush = bio_alloc_bioset(GFP_NOIO, 0,
+ flush = bio_alloc_bioset(bio->bi_bdev, 0, GFP_NOIO,
&dc->disk.bio_split);
if (!flush) {
s->iop.status = BLK_STS_RESOURCE;
goto insert_data;
}
- bio_copy_dev(flush, bio);
flush->bi_end_io = backing_request_endio;
flush->bi_private = cl;
flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_lock(&cc->bio_alloc_lock);
- clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
+ clone = bio_alloc_bioset(io->cc->dev->bdev, nr_iovecs, GFP_NOIO,
+ &cc->bs);
if (!clone)
goto out;
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
- bio_set_dev(clone, io->cc->dev->bdev);
clone->bi_opf = io->base_bio->bi_opf;
remaining_size = size;
num_bvecs = 1;
break;
default:
- num_bvecs = min_t(int, BIO_MAX_PAGES,
- dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
+ num_bvecs = dm_sector_div_up(remaining,
+ (PAGE_SIZE >> SECTOR_SHIFT));
}
- bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
+ bio = bio_alloc_bioset(where->bdev, num_bvecs, GFP_NOIO,
+ &io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
- bio_set_dev(bio, where->bdev);
bio->bi_end_io = endio;
bio_set_op_attrs(bio, op, op_flags);
store_io_and_region_in_bio(bio, io, region);
void *ptr;
size_t ret;
- bio = bio_alloc(GFP_KERNEL, 1);
+ bio = bio_alloc(lc->logdev->bdev, 1, GFP_KERNEL);
if (!bio) {
DMERR("Couldn't alloc log bio");
goto error;
}
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
log_end_super : log_end_io;
bio->bi_private = lc;
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, bio_pages);
+ bio = bio_alloc(lc->logdev->bdev, bio_pages, GFP_KERNEL);
if (!bio) {
DMERR("Couldn't alloc inline data bio");
goto error;
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
goto out;
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
+ bio = bio_alloc(lc->logdev->bdev, block->vec_cnt, GFP_KERNEL);
if (!bio) {
DMERR("Couldn't alloc log bio");
goto error;
}
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks);
submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
+ bio = bio_alloc(lc->logdev->bdev, block->vec_cnt - i,
+ GFP_KERNEL);
if (!bio) {
DMERR("Couldn't alloc log bio");
goto error;
}
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
return;
}
- discard_parent = bio_alloc(GFP_NOIO, 1);
+ /*
+ * discard_parent is never submitted, but just used as an anchor for
+ * chaining. Set the device to the pool_dev so that it is initialized.
+ */
+ discard_parent = bio_alloc(m->tc->pool_dev->bdev, 1, GFP_NOIO);
if (!discard_parent) {
DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
dm_device_name(tc->pool->pool_md));
max_pages = e->wc_list_contiguous;
- bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
+ bio = bio_alloc_bioset(wc->dev->bdev, max_pages, GFP_NOIO,
+ &wc->bio_set);
wb = container_of(bio, struct writeback_struct, bio);
wb->wc = wc;
bio->bi_end_io = writecache_writeback_endio;
- bio_set_dev(bio, wc->dev->bdev);
bio->bi_iter.bi_sector = read_original_sector(wc, e);
if (max_pages <= WB_LIST_INLINE ||
unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
if (!mblk)
return ERR_PTR(-ENOMEM);
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(dev->bdev, 1, GFP_NOIO);
if (!bio) {
dmz_free_mblock(zmd, mblk);
return ERR_PTR(-ENOMEM);
/* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
if (dmz_bdev_is_dying(dev))
return -EIO;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(dev->bdev, 1, GFP_NOIO);
if (!bio) {
set_bit(DMZ_META_ERROR, &mblk->state);
return -ENOMEM;
set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
if (dmz_bdev_is_dying(dev))
return -EIO;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(dev->bdev, 1, GFP_NOIO);
if (!bio)
return -ENOMEM;
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
struct dm_target_io *tio;
struct bio *clone;
- clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
+ clone = bio_alloc_bioset(md->disk->part0, 0, GFP_NOIO, &md->io_bs);
if (!clone)
return NULL;
/* the dm_target_io embedded in ci->io is available */
tio = &ci->io->tio;
} else {
- struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
+ struct bio *clone = bio_alloc_bioset(ci->bio->bi_bdev, 0,
+ gfp_mask,
+ &ci->io->md->bs);
if (!clone)
return NULL;
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
+ bi = bio_alloc_bioset(rdev->bdev, 0, GFP_NOIO,
+ &mddev->bio_set);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
- bio_set_dev(bi, rdev->bdev);
bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending);
submit_bio(bi);
if (test_bit(Faulty, &rdev->flags))
return;
- bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
+ bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
+ 1, GFP_NOIO, &mddev->sync_set);
atomic_inc(&rdev->nr_pending);
- bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
int i = 0;
struct bio *behind_bio = NULL;
- behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
+ behind_bio = bio_alloc_bioset(bio->bi_bdev, vcnt, GFP_NOIO,
+ &r1_bio->mddev->bio_set);
if (!behind_bio)
return;
return sectors_done;
}
- read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set);
-
- bio_set_dev(read_bio, rdev->bdev);
+ read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, GFP_KERNEL,
+ &mddev->bio_set);
read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
static struct bio *r5l_bio_alloc(struct r5l_log *log)
{
- struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, &log->bs);
+ struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_PAGES,
+ GFP_NOIO, &log->bs);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
return bio;
{
struct page *page;
- ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, &log->bs);
+ ctx->ra_bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_PAGES,
+ GFP_KERNEL, &log->bs);
if (!ctx->ra_bio)
return -ENOMEM;
sector_t offset)
{
bio_reset(ctx->ra_bio);
- bio_set_dev(ctx->ra_bio, log->rdev->bdev);
bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
struct bio *prev = bio;
- bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
- &ppl_conf->bs);
+ bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_PAGES,
+ GFP_NOIO, &ppl_conf->bs);
bio->bi_opf = prev->bi_opf;
bio->bi_write_hint = prev->bi_write_hint;
- bio_copy_dev(bio, prev);
bio->bi_iter.bi_sector = bio_end_sector(prev);
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
struct bio *bio;
char b[BDEVNAME_SIZE];
- bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
- bio_set_dev(bio, bdev);
+ bio = bio_alloc_bioset(bdev, 0, GFP_NOIO, &ppl_conf->flush_bs);
bio->bi_private = io;
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
bio->bi_end_io = ppl_flush_endio;
* parent bio. Otherwise directly call nd_region flush.
*/
if (bio && bio->bi_iter.bi_sector != -1) {
- struct bio *child = bio_alloc(GFP_ATOMIC, 0);
+ struct bio *child = bio_alloc(bio->bi_bdev, 0, GFP_ATOMIC);
if (!child)
return -ENOMEM;
- bio_copy_dev(child, bio);
child->bi_opf = REQ_PREFLUSH;
child->bi_iter.bi_sector = -1;
bio_chain(child, bio);
if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio_set_dev(bio, req->ns->bdev);
} else {
- bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ bio = bio_alloc(req->ns->bdev, sg_cnt, GFP_KERNEL);
}
- bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
}
}
- bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
- bio_set_dev(bio, req->ns->bdev);
+ bio = bio_alloc(req->ns->bdev, sg_cnt, GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
bio->bi_opf = op;
bio = &req->p.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
- bio = bio_alloc(GFP_KERNEL, min(req->sg_cnt, BIO_MAX_PAGES));
+ bio = bio_alloc(NULL, req->sg_cnt, GFP_KERNEL);
bio->bi_end_io = bio_put;
}
bio->bi_opf = req_op(rq);
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
- /*
- * Only allocate as many vector entries as the bio code allows us to,
- * we'll loop later on until we have handled the whole request.
- */
- if (sg_num > BIO_MAX_PAGES)
- sg_num = BIO_MAX_PAGES;
-
- bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set);
+ bio = bio_alloc_bioset(ib_dev->ibd_bd, sg_num, GFP_NOIO,
+ &ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
return NULL;
}
- bio_set_dev(bio, ib_dev->ibd_bd);
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
if (immed)
target_complete_cmd(cmd, SAM_STAT_GOOD);
- bio = bio_alloc(GFP_KERNEL, 0);
+ bio = bio_alloc(ib_dev->ibd_bd, 0, GFP_KERNEL);
bio->bi_end_io = iblock_end_io_flush;
- bio_set_dev(bio, ib_dev->ibd_bd);
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (!immed)
bio->bi_private = cmd;
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
+ bio = bio_alloc_bioset(bdev, nr_pages, GFP_KERNEL, &blkdev_dio_pool);
dio = container_of(bio, struct blkdev_dio, bio);
dio->is_sync = is_sync = is_sync_kiocb(iocb);
blk_start_plug(&plug);
for (;;) {
- bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> 9;
bio->bi_write_hint = iocb->ki_hint;
bio->bi_private = dio;
}
submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL, nr_pages);
+ bio = bio_alloc(bdev, nr_pages, GFP_KERNEL);
}
if (!is_poll)
* to do I/O, so we don't lose the ability to do integrity
* checking.
*/
- bio = bio_alloc(GFP_NOFS, 1);
- bio_set_dev(bio, device->bdev);
+ bio = bio_alloc(device->bdev, 1, GFP_NOFS);
bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
bio->bi_private = device;
bio->bi_end_io = btrfs_end_super_write;
{
struct bio *bio;
- bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
- bio_set_dev(bio, fs_info->sb->s_bdev);
+ bio = bio_alloc_bioset(fs_info->sb->s_bdev, GFP_NOFS, BIO_MAX_PAGES,
+ &btrfs_bioset);
bio->bi_iter.bi_sector = first_byte >> 9;
btrfs_io_bio_init(btrfs_io_bio(bio));
return bio;
struct bio *bio;
/* Bio allocation backed by a bioset does not fail */
- bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
- bio_set_dev(bio, bdev);
+ bio = bio_alloc_bioset(bdev, nr_iovecs, GFP_NOFS, &btrfs_bioset);
btrfs_io_bio_init(btrfs_io_bio(bio));
return bio;
}
if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
clear_buffer_write_io_error(bh);
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(bh->b_bdev, 1, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio_set_dev(bio, bh->b_bdev);
bio->bi_write_hint = write_hint;
bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
int num_pages = 0;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
- bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
+ bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_PAGES, GFP_NOFS);
while (len) {
unsigned int blocks_this_page = min(len, blocks_per_page);
if (num_pages == 0) {
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
- bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector =
pblk << (blockbits - SECTOR_SHIFT);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
return -EINVAL;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
- bio = bio_alloc(GFP_NOFS, nr_pages);
+ bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, GFP_NOFS);
do {
- bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
* bio_alloc() is guaranteed to return a bio when allowed to sleep and
* we request a valid number of vectors.
*/
- bio = bio_alloc(GFP_KERNEL, nr_vecs);
-
- bio_set_dev(bio, bdev);
+ bio = bio_alloc(bdev, nr_vecs, GFP_KERNEL);
bio->bi_iter.bi_sector = first_sector;
bio_set_op_attrs(bio, dio->op, dio->op_flags);
if (dio->is_async)
if (nblocks > BIO_MAX_PAGES)
nblocks = BIO_MAX_PAGES;
- bio = bio_alloc(GFP_NOIO, nblocks);
-
+ bio = bio_alloc(sb->s_bdev, nblocks, GFP_NOIO);
bio->bi_end_io = erofs_readendio;
- bio_set_dev(bio, sb->s_bdev);
bio->bi_iter.bi_sector = (sector_t)blknr <<
LOG_SECTORS_PER_BLOCK;
bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0);
}
if (!bio) {
- bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
-
+ bio = bio_alloc(sb->s_bdev, BIO_MAX_PAGES, GFP_NOIO);
bio->bi_end_io = z_erofs_decompressqueue_endio;
- bio_set_dev(bio, sb->s_bdev);
bio->bi_iter.bi_sector = (sector_t)cur <<
LOG_SECTORS_PER_BLOCK;
bio->bi_private = bi_private;
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
*/
- bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
+ bio = bio_alloc(bh->b_bdev, BIO_MAX_PAGES, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end);
io->io_bio = bio;
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
*/
- bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages, BIO_MAX_PAGES));
+ bio = bio_alloc(bdev, nr_pages, GFP_KERNEL);
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
GFP_KERNEL);
ext4_set_bio_post_read_ctx(bio, inode, page->index);
- bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
bio_set_op_attrs(bio, REQ_OP_READ,
struct bio *bio;
sector_t sector;
- bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
-
bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or);
- bio_set_dev(bio, bdev);
+ bio = bio_alloc_bioset(bdev, npages, GFP_NOIO, &f2fs_bioset);
bio->bi_iter.bi_sector = sector;
if (is_read_io(fio->op)) {
struct block_device *bdev;
sector_t sector;
- bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
- min_t(int, nr_pages, BIO_MAX_PAGES),
+ bdev = f2fs_target_device(sbi, blkaddr, §or);
+ bio = bio_alloc_bioset(bdev, nr_pages,
+ for_write ? GFP_NOIO : GFP_KERNEL,
&f2fs_bioset);
if (!bio)
return ERR_PTR(-ENOMEM);
f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
- bdev = f2fs_target_device(sbi, blkaddr, §or);
- bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = f2fs_read_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
bio_end_io_t *end_io)
{
struct super_block *sb = sdp->sd_vfs;
- struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
+ struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_PAGES, GFP_NOIO);
bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
- bio_set_dev(bio, sb->s_bdev);
bio->bi_end_io = end_io;
bio->bi_private = sdp;
{
struct bio *new;
- new = bio_alloc(GFP_NOIO, nr_iovecs);
- bio_copy_dev(new, prev);
+ new = bio_alloc(prev->bi_bdev, nr_iovecs, GFP_NOIO);
new->bi_iter.bi_sector = bio_end_sector(prev);
new->bi_opf = prev->bi_opf;
new->bi_write_hint = prev->bi_write_hint;
struct buffer_head *bh = *bhs;
struct bio *bio;
- bio = bio_alloc(GFP_NOIO, num);
+ bio = bio_alloc(bh->b_bdev, num, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio_set_dev(bio, bh->b_bdev);
while (num > 0) {
bh = *bhs;
if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
ClearPageDirty(page);
lock_page(page);
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = bio_alloc(sb->s_bdev, 1, GFP_NOFS);
bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
- bio_set_dev(bio, sb->s_bdev);
bio_add_page(bio, page, PAGE_SIZE, 0);
bio->bi_end_io = end_bio_io_page;
offset = start & (io_size - 1);
sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(sb->s_bdev, 1, GFP_NOIO);
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, sb->s_bdev);
bio_set_op_attrs(bio, op, op_flags);
if (op != WRITE && data)
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
- ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
+ ctx->bio = bio_alloc(iomap->bdev, nr_vecs, gfp);
/*
* If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates
* what do_mpage_readpage does.
*/
if (!ctx->bio)
- ctx->bio = bio_alloc(orig_gfp, 1);
+ ctx->bio = bio_alloc(iomap->bdev, 1, orig_gfp);
ctx->bio->bi_opf = REQ_OP_READ;
if (ctx->rac)
ctx->bio->bi_opf |= REQ_RAHEAD;
ctx->bio->bi_iter.bi_sector = sector;
- bio_set_dev(ctx->bio, iomap->bdev);
ctx->bio->bi_end_io = iomap_read_end_io;
}
struct iomap_ioend *ioend;
struct bio *bio;
- bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset);
- bio_set_dev(bio, wpc->iomap.bdev);
+ bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_PAGES, GFP_NOFS,
+ &iomap_ioend_bioset);
bio->bi_iter.bi_sector = sector;
bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
bio->bi_write_hint = inode->i_write_hint;
{
struct bio *new;
- new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
- bio_copy_dev(new, prev);/* also copies over blkcg information */
+ new = bio_alloc(prev->bi_bdev, BIO_MAX_PAGES, GFP_NOFS);
new->bi_iter.bi_sector = bio_end_sector(prev);
new->bi_opf = prev->bi_opf;
new->bi_write_hint = prev->bi_write_hint;
int flags = REQ_SYNC | REQ_IDLE;
struct bio *bio;
- bio = bio_alloc(GFP_KERNEL, 1);
- bio_set_dev(bio, iomap->bdev);
+ bio = bio_alloc(iomap->bdev, 1, GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
goto out;
}
- bio = bio_alloc(GFP_KERNEL, nr_pages);
- bio_set_dev(bio, iomap->bdev);
+ bio = bio_alloc(iomap->bdev, nr_pages, GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
bio->bi_write_hint = dio->iocb->ki_hint;
bio->bi_ioprio = dio->iocb->ki_ioprio;
bp->l_flag |= lbmREAD;
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = bio_alloc(log->bdev, 1, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
- bio_set_dev(bio, log->bdev);
bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
jfs_info("lbmStartIO");
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = bio_alloc(log->bdev, 1, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
- bio_set_dev(bio, log->bdev);
bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
}
len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
- bio = bio_alloc(GFP_NOFS, 1);
- bio_set_dev(bio, inode->i_sb->s_bdev);
+ bio = bio_alloc(inode->i_sb->s_bdev, 1, GFP_NOFS);
bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_write_end_io;
bio->bi_private = page;
if (bio)
submit_bio(bio);
- bio = bio_alloc(GFP_NOFS, 1);
- bio_set_dev(bio, inode->i_sb->s_bdev);
+ bio = bio_alloc(inode->i_sb->s_bdev, 1, GFP_NOFS);
bio->bi_iter.bi_sector =
pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_read_end_io;
/* Restrict the given (page cache) mask for slab allocations */
gfp_flags &= GFP_KERNEL;
- bio = bio_alloc(gfp_flags, nr_vecs);
+ bio = bio_alloc(bdev, nr_vecs, gfp_flags);
if (bio == NULL && (current->flags & PF_MEMALLOC)) {
while (!bio && (nr_vecs /= 2))
- bio = bio_alloc(gfp_flags, nr_vecs);
+ bio = bio_alloc(bdev, nr_vecs, gfp_flags);
}
- if (bio) {
- bio_set_dev(bio, bdev);
+ if (bio)
bio->bi_iter.bi_sector = first_sector;
- }
return bio;
}
bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
bio_end_io_t end_io, struct parallel_io *par)
{
- struct bio *bio;
+ struct bio *bio = bio_alloc(bdev, npg, GFP_NOIO);
- npg = min(npg, BIO_MAX_PAGES);
- bio = bio_alloc(GFP_NOIO, npg);
if (bio) {
bio->bi_iter.bi_sector = disk_sector;
- bio_set_dev(bio, bdev);
bio->bi_end_io = end_io;
bio->bi_private = par;
}
{
struct bio *bio;
- bio = bio_alloc(GFP_NOIO, nr_vecs);
+ bio = bio_alloc(nilfs->ns_bdev, nr_vecs, GFP_NOIO);
if (likely(bio)) {
- bio_set_dev(bio, nilfs->ns_bdev);
bio->bi_iter.bi_sector =
start << (nilfs->ns_blocksize_bits - 9);
}
* GFP_KERNEL that the local node can get fenced. It would be
* nicest if we could pre-allocate these bios and avoid this
* all together. */
- bio = bio_alloc(GFP_ATOMIC, 16);
+ bio = bio_alloc(reg->hr_bdev, 16, GFP_ATOMIC);
if (!bio) {
mlog(ML_ERROR, "Could not alloc slots BIO!\n");
bio = ERR_PTR(-ENOMEM);
/* Must put everything in 512 byte sectors for the bio... */
bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
- bio_set_dev(bio, reg->hr_bdev);
bio->bi_private = wc;
bio->bi_end_io = o2hb_bio_end_io;
bio_set_op_attrs(bio, op, op_flags);
struct bio *bio;
if (page_count <= BIO_MAX_PAGES)
- bio = bio_alloc(GFP_NOIO, page_count);
+ bio = bio_alloc(sb->s_bdev, page_count, GFP_NOIO);
else
bio = bio_kmalloc(GFP_NOIO, page_count);
if (!bio)
return -ENOMEM;
- bio_set_dev(bio, sb->s_bdev);
bio->bi_opf = READ;
bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT);
*/
#include "xfs.h"
-static inline unsigned int bio_max_vecs(unsigned int count)
-{
- return min_t(unsigned, howmany(count, PAGE_SIZE), BIO_MAX_PAGES);
-}
-
int
xfs_rw_bdev(
struct block_device *bdev,
if (is_vmalloc && op == REQ_OP_WRITE)
flush_kernel_vmap_range(data, count);
- bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
- bio_set_dev(bio, bdev);
+ bio = bio_alloc(bdev, howmany(left, PAGE_SIZE), GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
bio->bi_opf = op | REQ_META | REQ_SYNC;
while (bio_add_page(bio, page, len, off) != len) {
struct bio *prev = bio;
- bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
- bio_copy_dev(bio, prev);
+ bio = bio_alloc(bdev, howmany(left, PAGE_SIZE),
+ GFP_KERNEL);
bio->bi_iter.bi_sector = bio_end_sector(prev);
bio->bi_opf = prev->bi_opf;
bio_chain(prev, bio);
atomic_inc(&bp->b_io_remaining);
nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
- bio = bio_alloc(GFP_NOIO, nr_pages);
+ bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, GFP_NOIO);
bio_set_dev(bio, bp->b_target->bt_bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io;
if (!nr_pages)
return 0;
- bio = bio_alloc(GFP_NOFS, nr_pages);
+ bio = bio_alloc(bdev, nr_pages, GFP_NOFS);
if (!bio)
return -ENOMEM;
- bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = zi->i_zsector;
bio->bi_write_hint = iocb->ki_hint;
bio->bi_ioprio = iocb->ki_ioprio;
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
-extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
+struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned int nr_iovecs,
+ gfp_t gfp_mask, struct bio_set *bs);
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs);
extern void bio_put(struct bio *);
extern struct bio_set fs_bio_set;
-static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+static inline struct bio *bio_alloc(struct block_device *bdev,
+ unsigned int nr_iovecs, gfp_t gfp_mask)
{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
+ return bio_alloc_bioset(bdev, nr_iovecs, gfp_mask, &fs_bio_set);
}
extern blk_qc_t submit_bio(struct bio *);
struct bio *bio;
int error = 0;
- bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
+ bio = bio_alloc(hib_resume_bdev, 1, GFP_NOIO | __GFP_HIGH);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
- bio_set_dev(bio, hib_resume_bdev);
bio_set_op_attrs(bio, op, op_flags);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
return 0;
}
- bio = bio_alloc(GFP_NOIO, 1);
- bio_set_dev(bio, sis->bdev);
+ bio = bio_alloc(sis->bdev, 1, GFP_NOIO);
bio->bi_iter.bi_sector = swap_page_sector(page);
bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
bio->bi_end_io = end_write_func;
}
ret = 0;
- bio = bio_alloc(GFP_KERNEL, 1);
- bio_set_dev(bio, sis->bdev);
+ bio = bio_alloc(sis->bdev, 1, GFP_KERNEL);
bio->bi_opf = REQ_OP_READ;
bio->bi_iter.bi_sector = swap_page_sector(page);
bio->bi_end_io = end_swap_bio_read;