injects disk IO errors on devices permitted by setting
/sys/block/<device>/make-it-fail or
- /sys/block/<device>/<partition>/make-it-fail. (submit_bio_noacct())
+ /sys/block/<device>/<partition>/make-it-fail. (submit_bio())
- fail_mmc_request
if (!bio)
break;
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
}
* previously allocated bio for IO before attempting to allocate a new one.
* Failure to do so can cause deadlocks under memory pressure.
*
- * Note that when running under submit_bio_noacct() (i.e. any block driver),
- * bios are not submitted until after you return - see the code in
- * submit_bio_noacct() that converts recursion into iteration, to prevent
- * stack overflows.
+ * Note that when running under submit_bio() (i.e. any block driver), bios are
+ * not submitted until after you return - see the code in submit_bio() that
+ * converts recursion into iteration, to prevent stack overflows.
*
- * This would normally mean allocating multiple bios under submit_bio_noacct()
- * would be susceptible to deadlocks, but we have
- * deadlock avoidance code that resubmits any blocked bios from a rescuer
- * thread.
+ * This would normally mean allocating multiple bios under submit_bio() would be
+ * susceptible to deadlocks, but we have deadlock avoidance code that resubmits
+ * any blocked bios from a rescuer thread.
*
* However, we do not guarantee forward progress for allocations from other
- * mempools. Doing multiple allocations from the same mempool under
- * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
- * for per bio allocations.
+ * mempools. Doing multiple allocations from the same mempool under submit_bio()
+ * should be avoided - instead, use bio_set's front_pad for per bio allocations.
*
* Returns: Pointer to new bio on success, NULL on failure.
*/
return NULL;
/*
- * submit_bio_noacct() converts recursion to iteration; this means if
- * we're running beneath it, any bios we allocate and submit will not be
+ * submit_bio() converts recursion to iteration; this means if we're
+ * running beneath it, any bios we allocate and submit will not be
* submitted (and thus freed) until after we return.
*
* This exposes us to a potential deadlock if we allocate multiple bios
- * from the same bio_set() while running underneath submit_bio_noacct().
- * If we were to allocate multiple bios (say a stacking block driver
- * that was splitting bios), we would deadlock if we exhausted the
- * mempool's reserve.
+ * from the same bio_set() while running underneath submit_bio(). If we
+ * were to allocate multiple bios (say a stacking block driver that was
+ * splitting bios), we would deadlock if we exhausted the mempool's
+ * reserve.
*
* We solve this, and guarantee forward progress, with a rescuer
* workqueue per bio_set. If we go to allocate and there are bios on
* - We pretend that we have just taken it off a longer list, so we assign
* bio_list to a pointer to the bio_list_on_stack, thus initialising the
* bio_list of new bios to be added. ->submit_bio() may indeed add some more
- * bios through a recursive call to submit_bio_noacct. If it did, we find a
+ * bios through a recursive call to submit_bio. If it did, we find a
* non-NULL value in bio_list and re-enter the loop from the top.
* - In this case we really did just take the bio of the top of the list (no
* pretending) and so remove it from bio_list, and call into ->submit_bio()
void submit_bio_noacct_nocheck(struct bio *bio)
{
- /*
- * We only want one ->submit_bio to be active at a time, else stack
- * usage with stacked devices could be a problem. Use current->bio_list
- * to collect a list of requests submited by a ->submit_bio method while
- * it is active, and then process them after it returned.
- */
- if (current->bio_list)
- bio_list_add(¤t->bio_list[0], bio);
- else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
+ if (!bio->bi_bdev->bd_disk->fops->submit_bio)
__submit_bio_noacct_mq(bio);
else
__submit_bio_noacct(bio);
}
/**
- * submit_bio_noacct - re-submit a bio to the block device layer for I/O
- * @bio: The bio describing the location in memory and on the device.
+ * submit_bio - submit a bio to the block device layer for I/O
+ * @bio: The &struct bio which describes the I/O
+ *
+ * submit_bio() is used to submit I/O requests to block devices. It is passed a
+ * fully set up &struct bio that describes the I/O that needs to be done. The
+ * bio will be send to the device described by the bi_bdev field.
*
- * This is a version of submit_bio() that shall only be used for I/O that is
- * resubmitted to lower level drivers by stacking block drivers. All file
- * systems and other upper level users of the block layer should use
- * submit_bio() instead.
+ * The success/failure status of the request, along with notification of
+ * completion, is delivered asynchronously through the ->bi_end_io() callback
+ * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has
+ * been called.
*/
-void submit_bio_noacct(struct bio *bio)
+void submit_bio(struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
struct request_queue *q = bdev_get_queue(bdev);
might_sleep();
+ if (blkcg_punt_bio_submit(bio))
+ return;
+
+ /*
+ * We only want one ->submit_bio to be active at a time, else stack
+ * usage with stacked devices could be a problem. Use current->bio_list
+ * to collect a list of requests submited by a ->submit_bio method while
+ * it is active, and then process them after it returned.
+ */
+ if (current->bio_list) {
+ bio_list_add(¤t->bio_list[0], bio);
+ return;
+ }
+
+ if (bio_op(bio) == REQ_OP_READ) {
+ task_io_account_read(bio->bi_iter.bi_size);
+ count_vm_events(PGPGIN, bio_sectors(bio));
+ } else if (bio_op(bio) == REQ_OP_WRITE) {
+ count_vm_events(PGPGOUT, bio_sectors(bio));
+ }
+
plug = blk_mq_plug(q, bio);
if (plug && plug->nowait)
bio->bi_opf |= REQ_NOWAIT;
bio->bi_status = status;
bio_endio(bio);
}
-EXPORT_SYMBOL(submit_bio_noacct);
-
-/**
- * submit_bio - submit a bio to the block device layer for I/O
- * @bio: The &struct bio which describes the I/O
- *
- * submit_bio() is used to submit I/O requests to block devices. It is passed a
- * fully set up &struct bio that describes the I/O that needs to be done. The
- * bio will be send to the device described by the bi_bdev field.
- *
- * The success/failure status of the request, along with notification of
- * completion, is delivered asynchronously through the ->bi_end_io() callback
- * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
- * been called.
- */
-void submit_bio(struct bio *bio)
-{
- if (blkcg_punt_bio_submit(bio))
- return;
-
- if (bio_op(bio) == REQ_OP_READ) {
- task_io_account_read(bio->bi_iter.bi_size);
- count_vm_events(PGPGIN, bio_sectors(bio));
- } else if (bio_op(bio) == REQ_OP_WRITE) {
- count_vm_events(PGPGOUT, bio_sectors(bio));
- }
-
- submit_bio_noacct(bio);
-}
EXPORT_SYMBOL(submit_bio);
/**
return false;
}
bio_chain(split_bio, bio);
- submit_bio_noacct(bio);
+ submit_bio(bio);
*bio_ptr = split_bio;
}
* kernel crypto API. When the crypto API fallback is used for encryption,
* blk-crypto may choose to split the bio into 2 - the first one that will
* continue to be processed and the second one that will be resubmitted via
- * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
- * of the aforementioned "first one", and *bio_ptr will be updated to this
- * bounce bio.
+ * submit_bio. A bounce bio will be allocated to encrypt the contents of the
+ * aforementioned "first one", and *bio_ptr will be updated to this bounce bio.
*
* Caller must ensure bio has bio_crypt_ctx.
*
bio_chain(split, *bio);
trace_block_split(split, (*bio)->bi_iter.bi_sector);
- submit_bio_noacct(*bio);
+ submit_bio(*bio);
*bio = split;
}
}
if (sectors < bio_sectors(*bio_orig)) {
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
bio_chain(bio, *bio_orig);
- submit_bio_noacct(*bio_orig);
+ submit_bio(*bio_orig);
*bio_orig = bio;
}
bio = bounce_clone_bio(*bio_orig);
* workqueues instead.
*/
- /* We are not just doing submit_bio_noacct(),
+ /* We are not just doing submit_bio(),
* as we want to keep the start_time information. */
inc_ap_bio(device);
__drbd_make_request(device, bio);
if (drbd_insert_fault(device, fault_type))
bio_io_error(bio);
else
- submit_bio_noacct(bio);
+ submit_bio(bio);
} while (bios);
return 0;
}
else if (bio_op(bio) == REQ_OP_DISCARD)
drbd_process_discard_or_zeroes_req(req, EE_TRIM);
else
- submit_bio_noacct(bio);
+ submit_bio(bio);
put_ldev(device);
} else
bio_io_error(bio);
&drbd_io_bio_set);
req->private_bio->bi_private = req;
req->private_bio->bi_end_io = drbd_request_endio;
- submit_bio_noacct(req->private_bio);
+ submit_bio(req->private_bio);
return 0;
}
}
atomic_inc(&pd->cdrw.pending_bios);
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
}
bio_endio(bio);
return;
}
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
/*
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary.
*
- * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
+ * If IO is necessary and running under submit_bio, returns -EAGAIN.
*
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
!blk_queue_discard(bdev_get_queue(dc->bdev)))
bio->bi_end_io(bio);
else
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
static void quit_max_writeback_rate(struct cache_set *c,
if (!bio->bi_iter.bi_size) {
/*
- * can't call bch_journal_meta from under
- * submit_bio_noacct
+ * can't call bch_journal_meta from under submit_bio
*/
continue_at_nobarrier(&s->cl,
cached_dev_nodata,
if (!bio->bi_iter.bi_size) {
/*
- * can't call bch_journal_meta from under submit_bio_noacct
+ * can't call bch_journal_meta from under submit_bio
*/
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
blk_start_plug(&plug);
while ((bio = bio_list_pop(bios)))
- submit_bio_noacct(bio);
+ submit_bio(bio);
blk_finish_plug(&plug);
}
static void issue_bio(struct clone *clone, struct bio *bio)
{
if (!bio_triggers_commit(clone, bio)) {
- submit_bio_noacct(bio);
+ submit_bio(bio);
return;
}
bio_region_range(clone, bio, &rs, &nr_regions);
trim_bio(bio, region_to_sector(clone, rs),
nr_regions << clone->region_shift);
- submit_bio_noacct(bio);
+ submit_bio(bio);
} else
bio_endio(bio);
}
bio->bi_private = hd;
atomic_inc(&hd->clone->hydrations_in_flight);
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
/*
*/
bio_endio(bio);
} else {
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
}
}
*/
if (commit_needed)
set_bit(get_block(era, bio), ws->bits);
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
blk_finish_plug(&plug);
}
dio->in_flight = (atomic_t)ATOMIC_INIT(1);
dio->completion = NULL;
- submit_bio_noacct(bio);
-
+ submit_bio(bio);
return;
}
- submit_bio_noacct(bio);
+ submit_bio(bio);
if (need_sync_io) {
wait_for_completion_io(&read_comp);
bio_endio(bio);
break;
case DM_MAPIO_REMAPPED:
- submit_bio_noacct(bio);
+ submit_bio(bio);
break;
case DM_MAPIO_SUBMITTED:
break;
wakeup_mirrord(ms);
} else {
map_bio(get_default_mirror(ms), bio);
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
}
}
/*
* Issue the synchronous I/O from a different thread
- * to avoid submit_bio_noacct recursion.
+ * to avoid submit_bio recursion.
*/
INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- submit_bio_noacct(bio);
+ submit_bio(bio);
bio = n;
}
}
bio->bi_next = NULL;
r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
- submit_bio_noacct(bio);
+ submit_bio(bio);
bio = n;
}
}
bio->bi_end_io = full_bio_end_io;
bio->bi_private = callback_data;
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
static struct dm_snap_pending_exception *
verity_submit_prefetch(v, io);
- submit_bio_noacct(bio);
+ submit_bio(bio);
return DM_MAPIO_SUBMITTED;
}
bio_end_sector(bio));
wc_unlock(wc);
bio_set_dev(bio, wc->dev->bdev);
- submit_bio_noacct(bio);
+ submit_bio(bio);
} else {
writecache_flush(wc);
wc_unlock(wc);
bio_advance(bio, clone->bi_iter.bi_size);
refcount_inc(&bioctx->ref);
- submit_bio_noacct(clone);
+ submit_bio(clone);
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
zone->wp_block += nr_blocks;
dev_t dev, sector_t old_sector)
{
trace_block_bio_remap(clone, dev, old_sector);
- submit_bio_noacct(clone);
+ submit_bio(clone);
}
/*
goto out;
/*
- * Remainder must be passed to submit_bio_noacct() so it gets handled
+ * Remainder must be passed to submit_bio() so it gets handled
* *after* bios already submitted have been completely processed.
* We take a clone of the original to store in ci.io->orig_bio to be
* used by dm_end_io_acct() and for dm_io_complete() to use for
GFP_NOIO, &md->queue->bio_split);
bio_chain(orig_bio, bio);
trace_block_split(orig_bio, bio->bi_iter.bi_sector);
- submit_bio_noacct(bio);
+ submit_bio(bio);
out:
if (!orig_bio)
orig_bio = bio;
/*
* Restore .bi_private before possibly completing dm_io.
*
- * bio_poll() is only possible once @bio has been completely
- * submitted via submit_bio_noacct()'s depth-first submission.
- * So there is no dm_queue_poll_io() race associated with
- * clearing REQ_DM_POLL_LIST here.
+ * bio_poll() is only possible once @bio has been completely submitted
+ * via submit_bio()'s depth-first submission. So there is no
+ * dm_queue_poll_io() race associated with clearing REQ_DM_POLL_LIST
+ * here.
*/
bio->bi_opf &= ~REQ_DM_POLL_LIST;
bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data;
if (!bio)
break;
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
}
if (bio_data_dir(bio) == WRITE) {
/* write request */
if (atomic_read(&conf->counters[WriteAll])) {
- /* special case - don't decrement, don't submit_bio_noacct,
+ /* special case - don't decrement, don't submit_bio,
* just fail immediately
*/
bio_io_error(bio);
} else
bio_set_dev(bio, conf->rdev->bdev);
- submit_bio_noacct(bio);
+ submit_bio(bio);
return true;
}
struct bio *split = bio_split(bio, end_sector - bio_sector,
GFP_NOIO, &mddev->bio_set);
bio_chain(split, bio);
- submit_bio_noacct(bio);
+ submit_bio(bio);
bio = split;
}
trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
bio_sector);
mddev_check_write_zeroes(mddev, bio);
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
return true;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
mddev_check_write_zeroes(mddev, &mp_bh->bio);
- submit_bio_noacct(&mp_bh->bio);
+ submit_bio(&mp_bh->bio);
return true;
}
bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
}
spin_unlock_irqrestore(&conf->device_lock, flags);
trace_block_bio_remap(discard_bio,
disk_devt(mddev->gendisk),
bio->bi_iter.bi_sector);
- submit_bio_noacct(discard_bio);
+ submit_bio(discard_bio);
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
&mddev->bio_set);
bio_chain(split, bio);
- submit_bio_noacct(bio);
+ submit_bio(bio);
bio = split;
end = zone->zone_end;
} else
struct bio *split = bio_split(bio, sectors, GFP_NOIO,
&mddev->bio_set);
bio_chain(split, bio);
- submit_bio_noacct(bio);
+ submit_bio(bio);
bio = split;
}
trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
bio_sector);
mddev_check_write_zeroes(mddev, bio);
- submit_bio_noacct(bio);
+ submit_bio(bio);
return true;
}
/* Just ignore it */
bio_endio(bio);
else
- submit_bio_noacct(bio);
+ submit_bio(bio);
bio = next;
cond_resched();
}
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
bio_chain(split, bio);
- submit_bio_noacct(bio);
+ submit_bio(bio);
bio = split;
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
r1_bio->sector);
- submit_bio_noacct(read_bio);
+ submit_bio(read_bio);
}
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, max_sectors,
GFP_NOIO, &conf->bio_split);
bio_chain(split, bio);
- submit_bio_noacct(bio);
+ submit_bio(bio);
bio = split;
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
- submit_bio_noacct(wbio);
+ submit_bio(wbio);
}
put_sync_write_buf(r1_bio, 1);
md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
}
} else {
md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
return nr_sectors;
}
/* Just ignore it */
bio_endio(bio);
else
- submit_bio_noacct(bio);
+ submit_bio(bio);
bio = next;
}
blk_finish_plug(&plug);
/* Just ignore it */
bio_endio(bio);
else
- submit_bio_noacct(bio);
+ submit_bio(bio);
bio = next;
}
kfree(plug);
gfp, &conf->bio_split);
bio_chain(split, bio);
allow_barrier(conf);
- submit_bio_noacct(bio);
+ submit_bio(bio);
wait_barrier(conf, false);
bio = split;
r10_bio->master_bio = bio;
if (mddev->gendisk)
trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
r10_bio->sector);
- submit_bio_noacct(read_bio);
+ submit_bio(read_bio);
return;
}
GFP_NOIO, &conf->bio_split);
bio_chain(split, bio);
allow_barrier(conf);
- submit_bio_noacct(bio);
+ submit_bio(bio);
wait_barrier(conf, false);
bio = split;
r10_bio->master_bio = bio;
bio_chain(split, bio);
allow_barrier(conf);
/* Resend the fist split part */
- submit_bio_noacct(split);
+ submit_bio(split);
wait_barrier(conf, false);
}
div_u64_rem(bio_end, stripe_size, &remainder);
bio_chain(split, bio);
allow_barrier(conf);
/* Resend the second split part */
- submit_bio_noacct(bio);
+ submit_bio(bio);
bio = split;
wait_barrier(conf, false);
}
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
- submit_bio_noacct(tbio);
+ submit_bio(tbio);
}
/* Now write out to any replacement devices
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(tbio));
- submit_bio_noacct(tbio);
+ submit_bio(tbio);
}
done:
d = r10_bio->devs[1].devnum;
wbio = r10_bio->devs[1].bio;
wbio2 = r10_bio->devs[1].repl_bio;
- /* Need to test wbio2->bi_end_io before we call
- * submit_bio_noacct as if the former is NULL,
- * the latter is free to free wbio2.
+ /*
+ * Need to test wbio2->bi_end_io before we call submit_bio as if the
+ * former is NULL, the latter is free to free wbio2.
*/
if (wbio2 && !wbio2->bi_end_io)
wbio2 = NULL;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
- submit_bio_noacct(wbio);
+ submit_bio(wbio);
}
if (wbio2) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending);
md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(wbio2));
- submit_bio_noacct(wbio2);
+ submit_bio(wbio2);
}
}
* a number of r10_bio structures, one for each out-of-sync device.
* As we setup these structures, we collect all bio's together into a list
* which we then process collectively to add pages, and then process again
- * to pass to submit_bio_noacct.
+ * to pass to submit_bio.
*
* The r10_bio structures are linked using a borrowed master_bio pointer.
* This link is counted in ->remaining. When the r10_bio that points to NULL
if (bio->bi_end_io == end_sync_read) {
md_sync_acct_bio(bio, nr_sectors);
bio->bi_status = 0;
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
}
md_sync_acct_bio(read_bio, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
- submit_bio_noacct(read_bio);
+ submit_bio(read_bio);
sectors_done += nr_sectors;
if (sector_nr <= last)
goto read_more;
md_sync_acct_bio(b, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
b->bi_next = NULL;
- submit_bio_noacct(b);
+ submit_bio(b);
}
end_reshape_request(r10_bio);
}
struct bio *bio;
while ((bio = bio_list_pop(tmp)))
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
static int cmp_stripe(void *priv, const struct list_head *a,
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, bi);
else
- submit_bio_noacct(bi);
+ submit_bio(bi);
}
if (rrdev) {
if (s->syncing || s->expanding || s->expanded
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, rbi);
else
- submit_bio_noacct(rbi);
+ submit_bio(rbi);
}
if (!rdev && !rrdev) {
if (op_is_write(op))
if (mddev->gendisk)
trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
raid_bio->bi_iter.bi_sector);
- submit_bio_noacct(align_bio);
+ submit_bio(align_bio);
return 1;
out_rcu_unlock:
struct r5conf *conf = mddev->private;
split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
bio_chain(split, raid_bio);
- submit_bio_noacct(raid_bio);
+ submit_bio(raid_bio);
raid_bio = split;
}
bio->bi_opf |= REQ_NVME_MPATH;
trace_block_bio_remap(bio, disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
- submit_bio_noacct(bio);
+ submit_bio(bio);
} else if (nvme_available_path(head)) {
dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
next = bio->bi_next;
bio->bi_next = NULL;
- submit_bio_noacct(bio);
+ submit_bio(bio);
}
}
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
-void submit_bio_noacct(struct bio *bio);
extern int blk_lld_busy(struct request_queue *q);
extern void blk_queue_split(struct bio **);