/**
* bio_kmalloc - kmalloc a bio for I/O
- * @gfp_mask: the GFP_* mask given to the slab allocator
+ * @bdev: block device to allocate the bio for.
* @nr_iovecs: number of iovecs to pre-allocate
+ * @gfp_mask: the GFP_* mask given to the slab allocator
*
* Use kmalloc to allocate and initialize a bio.
*
* Returns: Pointer to new bio on success, NULL on failure.
*/
-struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+struct bio *bio_kmalloc(struct block_device *bdev, unsigned int nr_iovecs,
+ gfp_t gfp_mask)
{
struct bio *bio;
if (unlikely(!bio))
return NULL;
bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
+ if (bdev)
+ bio_set_dev(bio, bdev);
bio->bi_pool = NULL;
return bio;
}
struct bio_vec bv;
struct bio *bio;
- bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src));
+ bio = bio_kmalloc(bio_src->bi_bdev, bio_segments(bio_src), GFP_NOIO);
if (!bio)
return NULL;
- bio->bi_bdev = bio_src->bi_bdev;
+
if (bio_flagged(bio_src, BIO_REMAPPED))
bio_set_flag(bio, BIO_REMAPPED);
bio->bi_opf = bio_src->bi_opf;
nr_pages = BIO_MAX_PAGES;
ret = -ENOMEM;
- bio = bio_kmalloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(NULL, nr_pages, gfp_mask);
if (!bio)
goto out_bmd;
bio->bi_opf |= req_op(rq);
if (!iov_iter_count(iter))
return -EINVAL;
- bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
+ bio = bio_kmalloc(NULL, iov_iter_npages(iter, BIO_MAX_PAGES), gfp_mask);
if (!bio)
return -ENOMEM;
bio->bi_opf |= req_op(rq);
int offset, i;
struct bio *bio;
- bio = bio_kmalloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(NULL, nr_pages, gfp_mask);
if (!bio)
return ERR_PTR(-ENOMEM);
return ERR_PTR(-EINVAL);
nr_pages = end - start;
- bio = bio_kmalloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(NULL, nr_pages, gfp_mask);
if (!bio)
return ERR_PTR(-ENOMEM);
goto no_pkt;
pkt->frames = frames;
- pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
+ pkt->w_bio = bio_kmalloc(NULL, frames, GFP_KERNEL);
if (!pkt->w_bio)
goto no_bio;
bio_list_init(&pkt->orig_bios);
for (i = 0; i < frames; i++) {
- struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
+ struct bio *bio = bio_kmalloc(NULL, 1, GFP_KERNEL);
if (!bio)
goto no_rd_bio;
struct bio_vec bv, cbv;
struct bvec_iter iter, citer = { 0 };
- check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
+ check = bio_kmalloc(bio->bi_bdev, bio_segments(bio), GFP_NOIO);
if (!check)
return;
- bio_set_dev(check, bio->bi_bdev);
check->bi_opf = REQ_OP_READ;
check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
check->bi_iter.bi_size = bio->bi_iter.bi_size;
if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
vec_size += 2;
- bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
+ bio = bio_kmalloc(b->c->bdev, vec_size,
+ GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) {
dmio:
use_dmio(b, rw, sector, n_sectors, offset);
}
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, b->c->bdev);
bio_set_op_attrs(bio, rw, 0);
bio->bi_end_io = bio_complete;
bio->bi_private = b;
* Allocate bios : 1 for reading, n-1 for writing
*/
for (j = pi->raid_disks ; j-- ; ) {
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(NULL, RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
r1_bio->bios[j] = bio;
* Allocate bios.
*/
for (j = nalloc ; j-- ; ) {
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(NULL, RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
r10_bio->devs[j].bio = bio;
if (!conf->have_replacement)
continue;
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ bio = bio_kmalloc(NULL, RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
r10_bio->devs[j].repl_bio = bio;
* Use bio_malloc() following the comment in for bio -> struct request
* in block/blk-core.c:blk_make_request()
*/
- bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
+ bio = bio_kmalloc(NULL, nr_vecs, GFP_KERNEL);
if (!bio) {
pr_err("PSCSI: bio_kmalloc() failed\n");
return NULL;
/*
* Preallocate a bio that's always going to be used for flushing device
* barriers and matches the device lifespan
+ *
+ * bi_bdev will be set later before the bio is used.
*/
- dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0);
+ dev->flush_bio = bio_kmalloc(NULL, 0, GFP_KERNEL);
if (!dev->flush_bio) {
kfree(dev);
return ERR_PTR(-ENOMEM);
if (page_count <= BIO_MAX_PAGES)
bio = bio_alloc(sb->s_bdev, page_count, GFP_NOIO);
else
- bio = bio_kmalloc(GFP_NOIO, page_count);
+ bio = bio_kmalloc(sb->s_bdev, page_count, GFP_NOIO);
if (!bio)
return -ENOMEM;
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned int nr_iovecs,
gfp_t gfp_mask, struct bio_set *bs);
-struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs);
+struct bio *bio_kmalloc(struct block_device *bdev, unsigned int nr_iovecs,
+ gfp_t gfp_mask);
extern void bio_put(struct bio *);
void __bio_clone_fast(struct bio *, struct bio *);