From: Christoph Hellwig Date: Fri, 29 Jan 2021 05:51:48 +0000 (+0100) Subject: block: pass a bdev argument to bio_alloc{,_bioset} X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=ccde3ad3ca313b3cdeaf508b372f9d34b81609da;p=users%2Fhch%2Fblock.git block: pass a bdev argument to bio_alloc{,_bioset} Clean up some code and prepare for always having a valid bi_bdev in the bio. Signed-off-by: Christoph Hellwig --- diff --git a/block/bio.c b/block/bio.c index 1724cdef64dd..9baaa18c2d34 100644 --- a/block/bio.c +++ b/block/bio.c @@ -183,11 +183,9 @@ struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, case 65 ... 128: *idx = 4; break; - case 129 ... BIO_MAX_PAGES: + default: *idx = 5; break; - default: - return NULL; } /* @@ -291,12 +289,14 @@ EXPORT_SYMBOL(bio_init); void bio_reset(struct bio *bio) { unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); + struct block_device *bdev = bio->bi_bdev; bio_uninit(bio); memset(bio, 0, BIO_RESET_BYTES); bio->bi_flags = flags; atomic_set(&bio->__bi_remaining, 1); + bio_set_dev(bio, bdev); } EXPORT_SYMBOL(bio_reset); @@ -392,12 +392,16 @@ static void punt_bios_to_rescuer(struct bio_set *bs) /** * bio_alloc_bioset - allocate a bio for I/O - * @gfp_mask: the GFP_* mask given to the slab allocator + * @bdev: block device to allocate the bio for. * @nr_iovecs: number of iovecs to pre-allocate + * @gfp_mask: the GFP_* mask given to the slab allocator * @bs: the bio_set to allocate from. * * Allocate a bio from the mempools in @bs. * + * If @nr_iovecs is larger than BIO_MAX_PAGES the bio will still fit no more + * than BIO_MAX_PAGES segments. + * * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to * allocate a bio. This is due to the mempool guarantees. To make this work, * callers must never allocate more than 1 bio at a time from the general pool. @@ -422,8 +426,8 @@ static void punt_bios_to_rescuer(struct bio_set *bs) * * Returns: Pointer to new bio on success, NULL on failure. */ -struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs, - struct bio_set *bs) +struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned int nr_iovecs, + gfp_t gfp_mask, struct bio_set *bs) { gfp_t saved_gfp = gfp_mask; struct bio *bio; @@ -490,6 +494,8 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs, bio_init(bio, NULL, 0); } + if (bdev) + bio_set_dev(bio, bdev); bio->bi_pool = bs; return bio; @@ -695,12 +701,9 @@ struct bio *bio_clone_fast(struct block_device *bdev, struct bio *bio, { struct bio *b; - b = bio_alloc_bioset(gfp_mask, 0, bs); + b = bio_alloc_bioset(bdev, 0, gfp_mask, bs); if (!b) return NULL; - - if (bdev) - bio_set_dev(bio, bdev); __bio_clone_fast(b, bio); if (bio_crypt_clone(b, bio, gfp_mask) < 0) diff --git a/block/blk-lib.c b/block/blk-lib.c index a7e75167003d..08ea0c590a62 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -13,9 +13,8 @@ struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, unsigned int op, unsigned int nr_pages, gfp_t gfp) { - struct bio *new = bio_alloc(gfp, nr_pages); + struct bio *new = bio_alloc(bdev, nr_pages, gfp); - bio_set_dev(new, bdev); new->bi_opf = op; if (bio) { diff --git a/block/bounce.c b/block/bounce.c index fc55314aa426..85ba3fd15cea 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -243,10 +243,11 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask, * __bio_clone_fast() anyways. */ - bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); + bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src), + gfp_mask, bs); if (!bio) return NULL; - bio->bi_bdev = bio_src->bi_bdev; + if (bio_flagged(bio_src, BIO_REMAPPED)) bio_set_flag(bio, BIO_REMAPPED); bio->bi_opf = bio_src->bi_opf; diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 72cf7603d51f..d903fa551e20 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -138,8 +138,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, op_flags |= REQ_FUA | REQ_PREFLUSH; op_flags |= REQ_SYNC; - bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set); - bio_set_dev(bio, bdev->md_bdev); + bio = bio_alloc_bioset(bdev->md_bdev, 1, GFP_NOIO, &drbd_md_io_bio_set); bio->bi_iter.bi_sector = sector; err = -EIO; if (bio_add_page(bio, device->md_io.page, size, 0) != size) diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index c1f816f896a8..240b8c46b4d0 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -976,7 +976,7 @@ static void drbd_bm_endio(struct bio *bio) static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local) { - struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set); + struct bio *bio; struct drbd_device *device = ctx->device; struct drbd_bitmap *b = device->bitmap; struct page *page; @@ -1006,7 +1006,9 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho bm_store_page_idx(page, page_nr); } else page = b->bm_pages[page_nr]; - bio_set_dev(bio, device->ldev->md_bdev); + + bio = bio_alloc_bioset(device->ldev->md_bdev, 1, GFP_NOIO, + &drbd_md_io_bio_set); bio->bi_iter.bi_sector = on_disk_sector; /* bio_add_page of a single page to an empty bio will always succeed, * according to api. Do we want to assert that? */ diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 09c86ef3f0fd..533bedebd8c6 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1277,7 +1277,7 @@ static void one_flush_endio(struct bio *bio) static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx) { - struct bio *bio = bio_alloc(GFP_NOIO, 0); + struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0, GFP_NOIO); struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO); if (!bio || !octx) { drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n"); @@ -1296,7 +1296,6 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont octx->device = device; octx->ctx = ctx; - bio_set_dev(bio, device->ldev->backing_bdev); bio->bi_private = octx; bio->bi_end_io = one_flush_endio; bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH; @@ -1687,7 +1686,7 @@ int drbd_submit_peer_request(struct drbd_device *device, * generated bio, but a bio allocated on behalf of the peer. */ next_bio: - bio = bio_alloc(GFP_NOIO, nr_pages); + bio = bio_alloc(device->ldev->backing_bdev, nr_pages, GFP_NOIO); if (!bio) { drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages); goto fail; @@ -1695,7 +1694,6 @@ next_bio: /* > peer_req->i.sector, unless this is the first bio */ bio->bi_iter.bi_sector = sector; bio_set_dev(bio, device->ldev->backing_bdev); - bio_set_op_attrs(bio, op, op_flags); bio->bi_private = peer_req; bio->bi_end_io = drbd_peer_request_endio; diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c index b241a099aeae..3fecc989beec 100644 --- a/drivers/block/rnbd/rnbd-srv-dev.c +++ b/drivers/block/rnbd/rnbd-srv-dev.c @@ -56,14 +56,14 @@ void rnbd_dev_bi_end_io(struct bio *bio) /** * rnbd_bio_map_kern - map kernel address into bio * @data: pointer to buffer to map - * @bs: bio_set to use. + * @dev: device to use. * @len: length in bytes * @gfp_mask: allocation flags for bio allocation * * Map the kernel address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ -struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs, +struct bio *rnbd_bio_map_kern(void *data, struct rnbd_dev *dev, unsigned int len, gfp_t gfp_mask) { unsigned long kaddr = (unsigned long)data; @@ -73,7 +73,7 @@ struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs, int offset, i; struct bio *bio; - bio = bio_alloc_bioset(gfp_mask, nr_pages, bs); + bio = bio_alloc_bioset(dev->bdev, nr_pages, gfp_mask, dev->ibd_bio_set); if (!bio) return ERR_PTR(-ENOMEM); diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h index 0eb23850afb9..f699a0db0a2f 100644 --- a/drivers/block/rnbd/rnbd-srv-dev.h +++ b/drivers/block/rnbd/rnbd-srv-dev.h @@ -43,7 +43,7 @@ void rnbd_endio(void *priv, int error); void rnbd_dev_bi_end_io(struct bio *bio); -struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs, +struct bio *rnbd_bio_map_kern(void *data, struct rnbd_dev *dev, unsigned int len, gfp_t gfp_mask); static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev) diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index a6a68d44f517..17b5b6c99482 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -146,7 +146,7 @@ static int process_rdma(struct rtrs_srv *sess, priv->id = id; /* Generate bio with pages pointing to the rdma buffer */ - bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL); + bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev, datalen, GFP_KERNEL); if (IS_ERR(bio)) { err = PTR_ERR(bio); rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err); @@ -165,7 +165,6 @@ static int process_rdma(struct rtrs_srv *sess, prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR || usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio); bio_set_prio(bio, prio); - bio_set_dev(bio, sess_dev->rnbd_dev->bdev); submit_bio(bio); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 9ebf53903d7b..29f901c181bb 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1323,13 +1323,11 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, seg[i].nsec << 9, seg[i].offset) == 0)) { - int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES); - bio = bio_alloc(GFP_KERNEL, nr_iovecs); + bio = bio_alloc(preq.bdev, nseg - i, GFP_KERNEL); if (unlikely(bio == NULL)) goto fail_put_bio; biolist[nbio++] = bio; - bio_set_dev(bio, preq.bdev); bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_iter.bi_sector = preq.sector_number; @@ -1343,12 +1341,11 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, if (!bio) { BUG_ON(operation_flags != REQ_PREFLUSH); - bio = bio_alloc(GFP_KERNEL, 0); + bio = bio_alloc(preq.bdev, 0, GFP_KERNEL); if (unlikely(bio == NULL)) goto fail_put_bio; biolist[nbio++] = bio; - bio_set_dev(bio, preq.bdev); bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio_set_op_attrs(bio, operation, operation_flags); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index d6243dbc53cc..7ee6adf8fbf8 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -588,12 +588,11 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, { struct bio *bio; - bio = bio_alloc(GFP_ATOMIC, 1); + bio = bio_alloc(ram->bdev, 1, GFP_ATOMIC); if (!bio) return -ENOMEM; bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); - bio_set_dev(bio, zram->bdev); if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { bio_put(bio); return -EIO; diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c index b9a2aeba95ab..3e9ae030c847 100644 --- a/drivers/lightnvm/pblk-write.c +++ b/drivers/lightnvm/pblk-write.c @@ -611,7 +611,8 @@ static int pblk_submit_write(struct pblk *pblk, int *secs_left) } packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data); - bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs); + bio = bio_alloc(pblk->disk->part0, secs_to_sync + packed_meta_pgs, + GFP_KERNEL); bio->bi_iter.bi_sector = 0; /* internal bio */ bio_set_op_attrs(bio, REQ_OP_WRITE, 0); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 286a021569be..84917158da7c 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -917,14 +917,13 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, /* btree_search_recurse()'s btree iterator is no good anymore */ ret = miss == bio ? MAP_DONE : -EINTR; - cache_bio = bio_alloc_bioset(GFP_NOWAIT, + cache_bio = bio_alloc_bioset(miss->bi_bdev, DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), - &dc->disk.bio_split); + GFP_NOWAIT, &dc->disk.bio_split); if (!cache_bio) goto out_submit; cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; - bio_copy_dev(cache_bio, miss); cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; cache_bio->bi_end_io = backing_request_endio; @@ -1032,13 +1031,12 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) */ struct bio *flush; - flush = bio_alloc_bioset(GFP_NOIO, 0, + flush = bio_alloc_bioset(bio->bi_bdev, 0, GFP_NOIO, &dc->disk.bio_split); if (!flush) { s->iop.status = BLK_STS_RESOURCE; goto insert_data; } - bio_copy_dev(flush, bio); flush->bi_end_io = backing_request_endio; flush->bi_private = cl; flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index d0d6b7e568b9..edcfab3f445c 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1664,13 +1664,13 @@ retry: if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) mutex_lock(&cc->bio_alloc_lock); - clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs); + clone = bio_alloc_bioset(io->cc->dev->bdev, nr_iovecs, GFP_NOIO, + &cc->bs); if (!clone) goto out; clone->bi_private = io; clone->bi_end_io = crypt_endio; - bio_set_dev(clone, io->cc->dev->bdev); clone->bi_opf = io->base_bio->bi_opf; remaining_size = size; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 4312007d2d34..87b1463119b8 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -341,13 +341,13 @@ static void do_region(int op, int op_flags, unsigned region, num_bvecs = 1; break; default: - num_bvecs = min_t(int, BIO_MAX_PAGES, - dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); + num_bvecs = dm_sector_div_up(remaining, + (PAGE_SIZE >> SECTOR_SHIFT)); } - bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios); + bio = bio_alloc_bioset(where->bdev, num_bvecs, GFP_NOIO, + &io->client->bios); bio->bi_iter.bi_sector = where->sector + (where->count - remaining); - bio_set_dev(bio, where->bdev); bio->bi_end_io = endio; bio_set_op_attrs(bio, op, op_flags); store_io_and_region_in_bio(bio, io, region); diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index e3d35c6c9f71..b0427305af31 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -217,14 +217,13 @@ static int write_metadata(struct log_writes_c *lc, void *entry, void *ptr; size_t ret; - bio = bio_alloc(GFP_KERNEL, 1); + bio = bio_alloc(lc->logdev->bdev, 1, GFP_KERNEL); if (!bio) { DMERR("Couldn't alloc log bio"); goto error; } bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ? log_end_super : log_end_io; bio->bi_private = lc; @@ -276,7 +275,7 @@ static int write_inline_data(struct log_writes_c *lc, void *entry, atomic_inc(&lc->io_blocks); - bio = bio_alloc(GFP_KERNEL, bio_pages); + bio = bio_alloc(lc->logdev->bdev, bio_pages, GFP_KERNEL); if (!bio) { DMERR("Couldn't alloc inline data bio"); goto error; @@ -284,7 +283,6 @@ static int write_inline_data(struct log_writes_c *lc, void *entry, bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); @@ -364,14 +362,13 @@ static int log_one_block(struct log_writes_c *lc, goto out; atomic_inc(&lc->io_blocks); - bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); + bio = bio_alloc(lc->logdev->bdev, block->vec_cnt, GFP_KERNEL); if (!bio) { DMERR("Couldn't alloc log bio"); goto error; } bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); @@ -386,14 +383,14 @@ static int log_one_block(struct log_writes_c *lc, if (ret != block->vecs[i].bv_len) { atomic_inc(&lc->io_blocks); submit_bio(bio); - bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES)); + bio = bio_alloc(lc->logdev->bdev, block->vec_cnt - i, + GFP_KERNEL); if (!bio) { DMERR("Couldn't alloc log bio"); goto error; } bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index fff4c50df74d..ebb8d72939a4 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1179,7 +1179,11 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) return; } - discard_parent = bio_alloc(GFP_NOIO, 1); + /* + * discard_parent is never submitted, but just used as an anchor for + * chaining. Set the device to the pool_dev so that it is initialized. + */ + discard_parent = bio_alloc(m->tc->pool_dev->bdev, 1, GFP_NOIO); if (!discard_parent) { DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", dm_device_name(tc->pool->pool_md)); diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index d5223a0e5cc5..3415e65fdfd0 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -1674,11 +1674,11 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba max_pages = e->wc_list_contiguous; - bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set); + bio = bio_alloc_bioset(wc->dev->bdev, max_pages, GFP_NOIO, + &wc->bio_set); wb = container_of(bio, struct writeback_struct, bio); wb->wc = wc; bio->bi_end_io = writecache_writeback_endio; - bio_set_dev(bio, wc->dev->bdev); bio->bi_iter.bi_sector = read_original_sector(wc, e); if (max_pages <= WB_LIST_INLINE || unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *), diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 039d17b28938..21a28e52a1e4 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -550,7 +550,7 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, if (!mblk) return ERR_PTR(-ENOMEM); - bio = bio_alloc(GFP_NOIO, 1); + bio = bio_alloc(dev->bdev, 1, GFP_NOIO); if (!bio) { dmz_free_mblock(zmd, mblk); return ERR_PTR(-ENOMEM); @@ -578,7 +578,6 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, /* Submit read BIO */ bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio_set_dev(bio, dev->bdev); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO); @@ -725,7 +724,7 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, if (dmz_bdev_is_dying(dev)) return -EIO; - bio = bio_alloc(GFP_NOIO, 1); + bio = bio_alloc(dev->bdev, 1, GFP_NOIO); if (!bio) { set_bit(DMZ_META_ERROR, &mblk->state); return -ENOMEM; @@ -734,7 +733,6 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, set_bit(DMZ_META_WRITING, &mblk->state); bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio_set_dev(bio, dev->bdev); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); @@ -759,12 +757,11 @@ static int dmz_rdwr_block(struct dmz_dev *dev, int op, if (dmz_bdev_is_dying(dev)) return -EIO; - bio = bio_alloc(GFP_NOIO, 1); + bio = bio_alloc(dev->bdev, 1, GFP_NOIO); if (!bio) return -ENOMEM; bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio_set_dev(bio, dev->bdev); bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO); bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); ret = submit_bio_wait(bio); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 127bc70a3f8a..0787f4ee2539 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -624,7 +624,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) struct dm_target_io *tio; struct bio *clone; - clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); + clone = bio_alloc_bioset(md->disk->part0, 0, GFP_NOIO, &md->io_bs); if (!clone) return NULL; @@ -659,7 +659,9 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *t /* the dm_target_io embedded in ci->io is available */ tio = &ci->io->tio; } else { - struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); + struct bio *clone = bio_alloc_bioset(ci->bio->bi_bdev, 0, + gfp_mask, + &ci->io->md->bs); if (!clone) return NULL; diff --git a/drivers/md/md.c b/drivers/md/md.c index 21da0c48f6c2..85b56b9418f9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -595,10 +595,10 @@ static void submit_flushes(struct work_struct *ws) atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set); + bi = bio_alloc_bioset(rdev->bdev, 0, GFP_NOIO, + &mddev->bio_set); bi->bi_end_io = md_end_flush; bi->bi_private = rdev; - bio_set_dev(bi, rdev->bdev); bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; atomic_inc(&mddev->flush_pending); submit_bio(bi); @@ -981,11 +981,11 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, if (test_bit(Faulty, &rdev->flags)) return; - bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); + bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev, + 1, GFP_NOIO, &mddev->sync_set); atomic_inc(&rdev->nr_pending); - bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); bio->bi_iter.bi_sector = sector; bio_add_page(bio, page, size, 0); bio->bi_private = rdev; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 32937fa5c17a..61ebba71152a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1104,7 +1104,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, int i = 0; struct bio *behind_bio = NULL; - behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set); + behind_bio = bio_alloc_bioset(bio->bi_bdev, vcnt, GFP_NOIO, + &r1_bio->mddev->bio_set); if (!behind_bio) return; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index fb16ba85cc7b..e877f3f75430 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4529,9 +4529,8 @@ read_more: return sectors_done; } - read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set); - - bio_set_dev(read_bio, rdev->bdev); + read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, GFP_KERNEL, + &mddev->bio_set); read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr + rdev->data_offset); read_bio->bi_private = r10_bio; diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 4337ae0e6af2..a1378548a4f8 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -735,10 +735,10 @@ static void r5l_submit_current_io(struct r5l_log *log) static struct bio *r5l_bio_alloc(struct r5l_log *log) { - struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, &log->bs); + struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_PAGES, + GFP_NOIO, &log->bs); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - bio_set_dev(bio, log->rdev->bdev); bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; return bio; @@ -1634,7 +1634,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, { struct page *page; - ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, &log->bs); + ctx->ra_bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_PAGES, + GFP_KERNEL, &log->bs); if (!ctx->ra_bio) return -ENOMEM; @@ -1679,7 +1680,6 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, sector_t offset) { bio_reset(ctx->ra_bio); - bio_set_dev(ctx->ra_bio, log->rdev->bdev); bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0); ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index e8c118e05dfd..51a8d88295ef 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -496,11 +496,10 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) { struct bio *prev = bio; - bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, - &ppl_conf->bs); + bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_PAGES, + GFP_NOIO, &ppl_conf->bs); bio->bi_opf = prev->bi_opf; bio->bi_write_hint = prev->bi_write_hint; - bio_copy_dev(bio, prev); bio->bi_iter.bi_sector = bio_end_sector(prev); bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); @@ -637,8 +636,7 @@ static void ppl_do_flush(struct ppl_io_unit *io) struct bio *bio; char b[BDEVNAME_SIZE]; - bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs); - bio_set_dev(bio, bdev); + bio = bio_alloc_bioset(bdev, 0, GFP_NOIO, &ppl_conf->flush_bs); bio->bi_private = io; bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; bio->bi_end_io = ppl_flush_endio; diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c index 10351d5b49fa..366e3a038d53 100644 --- a/drivers/nvdimm/nd_virtio.c +++ b/drivers/nvdimm/nd_virtio.c @@ -105,11 +105,10 @@ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio) * parent bio. Otherwise directly call nd_region flush. */ if (bio && bio->bi_iter.bi_sector != -1) { - struct bio *child = bio_alloc(GFP_ATOMIC, 0); + struct bio *child = bio_alloc(bio->bi_bdev, 0, GFP_ATOMIC); if (!child) return -ENOMEM; - bio_copy_dev(child, bio); child->bi_opf = REQ_PREFLUSH; child->bi_iter.bi_sector = -1; bio_chain(child, bio); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index bf6e0ac9ad28..b7d13fe14866 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -262,10 +262,10 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) { bio = &req->b.inline_bio; bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); + bio_set_dev(bio, req->ns->bdev); } else { - bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); + bio = bio_alloc(req->ns->bdev, sg_cnt, GFP_KERNEL); } - bio_set_dev(bio, req->ns->bdev); bio->bi_iter.bi_sector = sector; bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; @@ -290,8 +290,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } } - bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); - bio_set_dev(bio, req->ns->bdev); + bio = bio_alloc(req->ns->bdev, sg_cnt, GFP_KERNEL); bio->bi_iter.bi_sector = sector; bio->bi_opf = op; diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index cbc88acdd233..482fe03b2dc3 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -198,7 +198,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) bio = &req->p.inline_bio; bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); } else { - bio = bio_alloc(GFP_KERNEL, min(req->sg_cnt, BIO_MAX_PAGES)); + bio = bio_alloc(NULL, req->sg_cnt, GFP_KERNEL); bio->bi_end_io = bio_put; } bio->bi_opf = req_op(rq); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 8ed93fd205c7..409e676c8f3d 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -311,20 +311,13 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op, struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); struct bio *bio; - /* - * Only allocate as many vector entries as the bio code allows us to, - * we'll loop later on until we have handled the whole request. - */ - if (sg_num > BIO_MAX_PAGES) - sg_num = BIO_MAX_PAGES; - - bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set); + bio = bio_alloc_bioset(ib_dev->ibd_bd, sg_num, GFP_NOIO, + &ib_dev->ibd_bio_set); if (!bio) { pr_err("Unable to allocate memory for bio\n"); return NULL; } - bio_set_dev(bio, ib_dev->ibd_bd); bio->bi_private = cmd; bio->bi_end_io = &iblock_bio_done; bio->bi_iter.bi_sector = lba; @@ -379,9 +372,8 @@ iblock_execute_sync_cache(struct se_cmd *cmd) if (immed) target_complete_cmd(cmd, SAM_STAT_GOOD); - bio = bio_alloc(GFP_KERNEL, 0); + bio = bio_alloc(ib_dev->ibd_bd, 0, GFP_KERNEL); bio->bi_end_io = iblock_end_io_flush; - bio_set_dev(bio, ib_dev->ibd_bd); bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; if (!immed) bio->bi_private = cmd; diff --git a/fs/block_dev.c b/fs/block_dev.c index 9d4b1a884d76..6c4c1fca138a 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -366,7 +366,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) (bdev_logical_block_size(bdev) - 1)) return -EINVAL; - bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool); + bio = bio_alloc_bioset(bdev, nr_pages, GFP_KERNEL, &blkdev_dio_pool); dio = container_of(bio, struct blkdev_dio, bio); dio->is_sync = is_sync = is_sync_kiocb(iocb); @@ -389,7 +389,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) blk_start_plug(&plug); for (;;) { - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = pos >> 9; bio->bi_write_hint = iocb->ki_hint; bio->bi_private = dio; @@ -446,7 +445,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) } submit_bio(bio); - bio = bio_alloc(GFP_KERNEL, nr_pages); + bio = bio_alloc(bdev, nr_pages, GFP_KERNEL); } if (!is_poll) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6b35b7e88136..b6c2524284ca 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3636,8 +3636,7 @@ static int write_dev_supers(struct btrfs_device *device, * to do I/O, so we don't lose the ability to do integrity * checking. */ - bio = bio_alloc(GFP_NOFS, 1); - bio_set_dev(bio, device->bdev); + bio = bio_alloc(device->bdev, 1, GFP_NOFS); bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT; bio->bi_private = device; bio->bi_end_io = btrfs_end_super_write; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f391239be27e..f2728918b0df 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3006,8 +3006,8 @@ struct bio *btrfs_bio_alloc(struct btrfs_fs_info *fs_info, u64 first_byte) { struct bio *bio; - bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset); - bio_set_dev(bio, fs_info->sb->s_bdev); + bio = bio_alloc_bioset(fs_info->sb->s_bdev, GFP_NOFS, BIO_MAX_PAGES, + &btrfs_bioset); bio->bi_iter.bi_sector = first_byte >> 9; btrfs_io_bio_init(btrfs_io_bio(bio)); return bio; @@ -3032,8 +3032,7 @@ struct bio *btrfs_io_bio_alloc(struct block_device *bdev, struct bio *bio; /* Bio allocation backed by a bioset does not fail */ - bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset); - bio_set_dev(bio, bdev); + bio = bio_alloc_bioset(bdev, nr_iovecs, GFP_NOFS, &btrfs_bioset); btrfs_io_bio_init(btrfs_io_bio(bio)); return bio; } diff --git a/fs/buffer.c b/fs/buffer.c index 32647d2011df..3da67a0fc44c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3023,12 +3023,11 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) clear_buffer_write_io_error(bh); - bio = bio_alloc(GFP_NOIO, 1); + bio = bio_alloc(bh->b_bdev, 1, GFP_NOIO); fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); - bio_set_dev(bio, bh->b_bdev); bio->bi_write_hint = write_hint; bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index b048a0e38516..4ee2598958ac 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -52,7 +52,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, int num_pages = 0; /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ - bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); + bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_PAGES, GFP_NOFS); while (len) { unsigned int blocks_this_page = min(len, blocks_per_page); @@ -60,7 +60,6 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, if (num_pages == 0) { fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); - bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (blockbits - SECTOR_SHIFT); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); @@ -148,10 +147,9 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, return -EINVAL; /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ - bio = bio_alloc(GFP_NOFS, nr_pages); + bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, GFP_NOFS); do { - bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (blockbits - 9); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); diff --git a/fs/direct-io.c b/fs/direct-io.c index aa1083ecd623..9e1abe595f93 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -397,9 +397,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, * bio_alloc() is guaranteed to return a bio when allowed to sleep and * we request a valid number of vectors. */ - bio = bio_alloc(GFP_KERNEL, nr_vecs); - - bio_set_dev(bio, bdev); + bio = bio_alloc(bdev, nr_vecs, GFP_KERNEL); bio->bi_iter.bi_sector = first_sector; bio_set_op_attrs(bio, dio->op, dio->op_flags); if (dio->is_async) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index ea4f693bee22..29c20ef22754 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -218,10 +218,8 @@ submit_bio_retry: if (nblocks > BIO_MAX_PAGES) nblocks = BIO_MAX_PAGES; - bio = bio_alloc(GFP_NOIO, nblocks); - + bio = bio_alloc(sb->s_bdev, nblocks, GFP_NOIO); bio->bi_end_io = erofs_readendio; - bio_set_dev(bio, sb->s_bdev); bio->bi_iter.bi_sector = (sector_t)blknr << LOG_SECTORS_PER_BLOCK; bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 6cb356c4217b..2784d5d52cc0 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1235,10 +1235,8 @@ submit_bio_retry: } if (!bio) { - bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); - + bio = bio_alloc(sb->s_bdev, BIO_MAX_PAGES, GFP_NOIO); bio->bi_end_io = z_erofs_decompressqueue_endio; - bio_set_dev(bio, sb->s_bdev); bio->bi_iter.bi_sector = (sector_t)cur << LOG_SECTORS_PER_BLOCK; bio->bi_private = bi_private; diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 03a44a0de86a..df0a5dcb3364 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -398,10 +398,9 @@ static void io_submit_init_bio(struct ext4_io_submit *io, * bio_alloc will _always_ be able to allocate a bio if * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset(). */ - bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); + bio = bio_alloc(bh->b_bdev, BIO_MAX_PAGES, GFP_NOIO); fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); - bio_set_dev(bio, bh->b_bdev); bio->bi_end_io = ext4_end_bio; bio->bi_private = ext4_get_io_end(io->io_end); io->io_bio = bio; diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index f014c5e473a9..5e9ff4d6ef3e 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -371,12 +371,10 @@ int ext4_mpage_readpages(struct inode *inode, * bio_alloc will _always_ be able to allocate a bio if * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset(). */ - bio = bio_alloc(GFP_KERNEL, - min_t(int, nr_pages, BIO_MAX_PAGES)); + bio = bio_alloc(bdev, nr_pages, GFP_KERNEL); fscrypt_set_bio_crypt_ctx(bio, inode, next_block, GFP_KERNEL); ext4_set_bio_post_read_ctx(bio, inode, page->index); - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_end_io = mpage_end_io; bio_set_op_attrs(bio, REQ_OP_READ, diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 170fa5ea8f86..8efd091e0e30 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -411,10 +411,8 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) struct bio *bio; sector_t sector; - bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset); - bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or); - bio_set_dev(bio, bdev); + bio = bio_alloc_bioset(bdev, npages, GFP_NOIO, &f2fs_bioset); bio->bi_iter.bi_sector = sector; if (is_read_io(fio->op)) { @@ -1012,16 +1010,15 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, struct block_device *bdev; sector_t sector; - bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL, - min_t(int, nr_pages, BIO_MAX_PAGES), + bdev = f2fs_target_device(sbi, blkaddr, §or); + bio = bio_alloc_bioset(bdev, nr_pages, + for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset); if (!bio) return ERR_PTR(-ENOMEM); f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS); - bdev = f2fs_target_device(sbi, blkaddr, §or); - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = sector; bio->bi_end_io = f2fs_read_end_io; bio_set_op_attrs(bio, REQ_OP_READ, op_flag); diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 3922b26264f5..f4f3f90b8946 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -262,10 +262,9 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, bio_end_io_t *end_io) { struct super_block *sb = sdp->sd_vfs; - struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); + struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_PAGES, GFP_NOIO); bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; - bio_set_dev(bio, sb->s_bdev); bio->bi_end_io = end_io; bio->bi_private = sdp; @@ -481,8 +480,7 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) { struct bio *new; - new = bio_alloc(GFP_NOIO, nr_iovecs); - bio_copy_dev(new, prev); + new = bio_alloc(prev->bi_bdev, nr_iovecs, GFP_NOIO); new->bi_iter.bi_sector = bio_end_sector(prev); new->bi_opf = prev->bi_opf; new->bi_write_hint = prev->bi_write_hint; diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 2db573e31f78..3ae5cbee814b 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -216,9 +216,8 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[], struct buffer_head *bh = *bhs; struct bio *bio; - bio = bio_alloc(GFP_NOIO, num); + bio = bio_alloc(bh->b_bdev, num, GFP_NOIO); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); - bio_set_dev(bio, bh->b_bdev); while (num > 0) { bh = *bhs; if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) { diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 61fce59cb4d3..57dbe12e0fcd 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -254,9 +254,8 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) ClearPageDirty(page); lock_page(page); - bio = bio_alloc(GFP_NOFS, 1); + bio = bio_alloc(sb->s_bdev, 1, GFP_NOFS); bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); - bio_set_dev(bio, sb->s_bdev); bio_add_page(bio, page, PAGE_SIZE, 0); bio->bi_end_io = end_bio_io_page; diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 0350dc7821bf..6b570d4055bd 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -64,9 +64,8 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, offset = start & (io_size - 1); sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); - bio = bio_alloc(GFP_NOIO, 1); + bio = bio_alloc(sb->s_bdev, 1, GFP_NOIO); bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, sb->s_bdev); bio_set_op_attrs(bio, op, op_flags); if (op != WRITE && data) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 16a1e82e3aeb..0f971179b00c 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -285,19 +285,18 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, if (ctx->rac) /* same as readahead_gfp_mask */ gfp |= __GFP_NORETRY | __GFP_NOWARN; - ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); + ctx->bio = bio_alloc(iomap->bdev, nr_vecs, gfp); /* * If the bio_alloc fails, try it again for a single page to * avoid having to deal with partial page reads. This emulates * what do_mpage_readpage does. */ if (!ctx->bio) - ctx->bio = bio_alloc(orig_gfp, 1); + ctx->bio = bio_alloc(iomap->bdev, 1, orig_gfp); ctx->bio->bi_opf = REQ_OP_READ; if (ctx->rac) ctx->bio->bi_opf |= REQ_RAHEAD; ctx->bio->bi_iter.bi_sector = sector; - bio_set_dev(ctx->bio, iomap->bdev); ctx->bio->bi_end_io = iomap_read_end_io; } @@ -1221,8 +1220,8 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend; struct bio *bio; - bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset); - bio_set_dev(bio, wpc->iomap.bdev); + bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_PAGES, GFP_NOFS, + &iomap_ioend_bioset); bio->bi_iter.bi_sector = sector; bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); bio->bi_write_hint = inode->i_write_hint; @@ -1252,8 +1251,7 @@ iomap_chain_bio(struct bio *prev) { struct bio *new; - new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); - bio_copy_dev(new, prev);/* also copies over blkcg information */ + new = bio_alloc(prev->bi_bdev, BIO_MAX_PAGES, GFP_NOFS); new->bi_iter.bi_sector = bio_end_sector(prev); new->bi_opf = prev->bi_opf; new->bi_write_hint = prev->bi_write_hint; diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index ea1e8f696076..dc5ea474530c 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -189,8 +189,7 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, int flags = REQ_SYNC | REQ_IDLE; struct bio *bio; - bio = bio_alloc(GFP_KERNEL, 1); - bio_set_dev(bio, iomap->bdev); + bio = bio_alloc(iomap->bdev, 1, GFP_KERNEL); bio->bi_iter.bi_sector = iomap_sector(iomap, pos); bio->bi_private = dio; bio->bi_end_io = iomap_dio_bio_end_io; @@ -269,8 +268,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, goto out; } - bio = bio_alloc(GFP_KERNEL, nr_pages); - bio_set_dev(bio, iomap->bdev); + bio = bio_alloc(iomap->bdev, nr_pages, GFP_KERNEL); bio->bi_iter.bi_sector = iomap_sector(iomap, pos); bio->bi_write_hint = dio->iocb->ki_hint; bio->bi_ioprio = dio->iocb->ki_ioprio; diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 9330eff210e0..152d80ed8501 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1979,10 +1979,9 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) bp->l_flag |= lbmREAD; - bio = bio_alloc(GFP_NOFS, 1); + bio = bio_alloc(log->bdev, 1, GFP_NOFS); bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); - bio_set_dev(bio, log->bdev); bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); @@ -2124,9 +2123,8 @@ static void lbmStartIO(struct lbuf * bp) jfs_info("lbmStartIO"); - bio = bio_alloc(GFP_NOFS, 1); + bio = bio_alloc(log->bdev, 1, GFP_NOFS); bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); - bio_set_dev(bio, log->bdev); bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 176580f54af9..b1ad4718ae0d 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -416,8 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) } len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage); - bio = bio_alloc(GFP_NOFS, 1); - bio_set_dev(bio, inode->i_sb->s_bdev); + bio = bio_alloc(inode->i_sb->s_bdev, 1, GFP_NOFS); bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); bio->bi_end_io = metapage_write_end_io; bio->bi_private = page; @@ -496,8 +495,7 @@ static int metapage_readpage(struct file *fp, struct page *page) if (bio) submit_bio(bio); - bio = bio_alloc(GFP_NOFS, 1); - bio_set_dev(bio, inode->i_sb->s_bdev); + bio = bio_alloc(inode->i_sb->s_bdev, 1, GFP_NOFS); bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); bio->bi_end_io = metapage_read_end_io; diff --git a/fs/mpage.c b/fs/mpage.c index 830e6cc2a9e7..bfd74c0ae85c 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -76,17 +76,15 @@ mpage_alloc(struct block_device *bdev, /* Restrict the given (page cache) mask for slab allocations */ gfp_flags &= GFP_KERNEL; - bio = bio_alloc(gfp_flags, nr_vecs); + bio = bio_alloc(bdev, nr_vecs, gfp_flags); if (bio == NULL && (current->flags & PF_MEMALLOC)) { while (!bio && (nr_vecs /= 2)) - bio = bio_alloc(gfp_flags, nr_vecs); + bio = bio_alloc(bdev, nr_vecs, gfp_flags); } - if (bio) { - bio_set_dev(bio, bdev); + if (bio) bio->bi_iter.bi_sector = first_sector; - } return bio; } diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 1a96ce28efb0..6437293775f3 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -119,13 +119,10 @@ static struct bio * bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector, bio_end_io_t end_io, struct parallel_io *par) { - struct bio *bio; + struct bio *bio = bio_alloc(bdev, npg, GFP_NOIO); - npg = min(npg, BIO_MAX_PAGES); - bio = bio_alloc(GFP_NOIO, npg); if (bio) { bio->bi_iter.bi_sector = disk_sector; - bio_set_dev(bio, bdev); bio->bi_end_io = end_io; bio->bi_private = par; } diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 1e75417bfe6e..de7aaffa9fcb 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -385,9 +385,8 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start, { struct bio *bio; - bio = bio_alloc(GFP_NOIO, nr_vecs); + bio = bio_alloc(nilfs->ns_bdev, nr_vecs, GFP_NOIO); if (likely(bio)) { - bio_set_dev(bio, nilfs->ns_bdev); bio->bi_iter.bi_sector = start << (nilfs->ns_blocksize_bits - 9); } diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 0179a73a3fa2..5a9a1b2f128b 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -520,7 +520,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, * GFP_KERNEL that the local node can get fenced. It would be * nicest if we could pre-allocate these bios and avoid this * all together. */ - bio = bio_alloc(GFP_ATOMIC, 16); + bio = bio_alloc(reg->hr_bdev, 16, GFP_ATOMIC); if (!bio) { mlog(ML_ERROR, "Could not alloc slots BIO!\n"); bio = ERR_PTR(-ENOMEM); @@ -529,7 +529,6 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, /* Must put everything in 512 byte sectors for the bio... */ bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); - bio_set_dev(bio, reg->hr_bdev); bio->bi_private = wc; bio->bi_end_io = o2hb_bio_end_io; bio_set_op_attrs(bio, op, op_flags); diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 8a19773b5a0b..0f7a8baca618 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -88,14 +88,13 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length, struct bio *bio; if (page_count <= BIO_MAX_PAGES) - bio = bio_alloc(GFP_NOIO, page_count); + bio = bio_alloc(sb->s_bdev, page_count, GFP_NOIO); else bio = bio_kmalloc(GFP_NOIO, page_count); if (!bio) return -ENOMEM; - bio_set_dev(bio, sb->s_bdev); bio->bi_opf = READ; bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT); diff --git a/fs/xfs/xfs_bio_io.c b/fs/xfs/xfs_bio_io.c index e2148f2d5d6b..07c0b735294b 100644 --- a/fs/xfs/xfs_bio_io.c +++ b/fs/xfs/xfs_bio_io.c @@ -4,11 +4,6 @@ */ #include "xfs.h" -static inline unsigned int bio_max_vecs(unsigned int count) -{ - return min_t(unsigned, howmany(count, PAGE_SIZE), BIO_MAX_PAGES); -} - int xfs_rw_bdev( struct block_device *bdev, @@ -26,8 +21,7 @@ xfs_rw_bdev( if (is_vmalloc && op == REQ_OP_WRITE) flush_kernel_vmap_range(data, count); - bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left)); - bio_set_dev(bio, bdev); + bio = bio_alloc(bdev, howmany(left, PAGE_SIZE), GFP_KERNEL); bio->bi_iter.bi_sector = sector; bio->bi_opf = op | REQ_META | REQ_SYNC; @@ -39,8 +33,8 @@ xfs_rw_bdev( while (bio_add_page(bio, page, len, off) != len) { struct bio *prev = bio; - bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left)); - bio_copy_dev(bio, prev); + bio = bio_alloc(bdev, howmany(left, PAGE_SIZE), + GFP_KERNEL); bio->bi_iter.bi_sector = bio_end_sector(prev); bio->bi_opf = prev->bi_opf; bio_chain(prev, bio); diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index f8400bbd6473..52eb0ee42e7b 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1507,7 +1507,7 @@ next_chunk: atomic_inc(&bp->b_io_remaining); nr_pages = min(total_nr_pages, BIO_MAX_PAGES); - bio = bio_alloc(GFP_NOIO, nr_pages); + bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, GFP_NOIO); bio_set_dev(bio, bp->b_target->bt_bdev); bio->bi_iter.bi_sector = sector; bio->bi_end_io = xfs_buf_bio_end_io; diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index ab68e27bb322..51cfe2ee7a4b 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -678,11 +678,10 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) if (!nr_pages) return 0; - bio = bio_alloc(GFP_NOFS, nr_pages); + bio = bio_alloc(bdev, nr_pages, GFP_NOFS); if (!bio) return -ENOMEM; - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = zi->i_zsector; bio->bi_write_hint = iocb->ki_hint; bio->bi_ioprio = iocb->ki_ioprio; diff --git a/include/linux/bio.h b/include/linux/bio.h index 846b796d8bb4..5ca149d60c41 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -407,7 +407,8 @@ extern void bioset_exit(struct bio_set *); extern int biovec_init_pool(mempool_t *pool, int pool_entries); extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); -extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *); +struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned int nr_iovecs, + gfp_t gfp_mask, struct bio_set *bs); struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs); extern void bio_put(struct bio *); @@ -417,9 +418,10 @@ struct bio *bio_clone_fast(struct block_device *bdev, struct bio *bio, extern struct bio_set fs_bio_set; -static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) +static inline struct bio *bio_alloc(struct block_device *bdev, + unsigned int nr_iovecs, gfp_t gfp_mask) { - return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); + return bio_alloc_bioset(bdev, nr_iovecs, gfp_mask, &fs_bio_set); } extern blk_qc_t submit_bio(struct bio *); diff --git a/kernel/power/swap.c b/kernel/power/swap.c index c73f2e295167..e5da27ddf7de 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -274,9 +274,8 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr, struct bio *bio; int error = 0; - bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1); + bio = bio_alloc(hib_resume_bdev, 1, GFP_NOIO | __GFP_HIGH); bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); - bio_set_dev(bio, hib_resume_bdev); bio_set_op_attrs(bio, op, op_flags); if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { diff --git a/mm/page_io.c b/mm/page_io.c index 92f7941c6d01..9ab0912c90e7 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -342,8 +342,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, return 0; } - bio = bio_alloc(GFP_NOIO, 1); - bio_set_dev(bio, sis->bdev); + bio = bio_alloc(sis->bdev, 1, GFP_NOIO); bio->bi_iter.bi_sector = swap_page_sector(page); bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc); bio->bi_end_io = end_write_func; @@ -408,8 +407,7 @@ int swap_readpage(struct page *page, bool synchronous) } ret = 0; - bio = bio_alloc(GFP_KERNEL, 1); - bio_set_dev(bio, sis->bdev); + bio = bio_alloc(sis->bdev, 1, GFP_KERNEL); bio->bi_opf = REQ_OP_READ; bio->bi_iter.bi_sector = swap_page_sector(page); bio->bi_end_io = end_swap_bio_read;