block: remove submit_bio_noacct
authorChristoph Hellwig <hch@lst.de>
Wed, 30 Mar 2022 08:14:58 +0000 (10:14 +0200)
committerChristoph Hellwig <hch@lst.de>
Thu, 31 Mar 2022 12:05:48 +0000 (14:05 +0200)
The decision when exactly to use submit_bio_noacct has been non-trivial,
and a bunch of useful submit_bio wrappers like submit_bio_wait lead to
non-consistent usage.

Try to make a little more sense of this by not doing accounting for any
I/O that is submitted from under another submit_bio instance, and just
move the recursion avoidance to the beginning of submit_bio and thus
remove the need for a separate submit_bio_noacct interface.  This delays
a few of the checks done in submit_bio for the recurisve case, but that
just brings us back to how we've done it in the past.

Signed-off-by: Christoph Hellwig <hch@lst.de>
36 files changed:
Documentation/fault-injection/fault-injection.rst
block/bio.c
block/blk-core.c
block/blk-crypto-fallback.c
block/blk-crypto.c
block/blk-merge.c
block/bounce.c
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/pktcdvd.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/request.c
drivers/md/dm-clone-target.c
drivers/md/dm-era-target.c
drivers/md/dm-integrity.c
drivers/md/dm-mpath.c
drivers/md/dm-raid1.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-snap.c
drivers/md/dm-verity-target.c
drivers/md/dm-writecache.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/md/md-faulty.c
drivers/md/md-linear.c
drivers/md/md-multipath.c
drivers/md/md.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/nvme/host/multipath.c
include/linux/blkdev.h

index 4a25c5eb6f07266a21afe5124628e0a763e1e7b7..0aa70e5b87d57bf2e90ef0b250fc741017efc5f3 100644 (file)
@@ -32,7 +32,7 @@ Available fault injection capabilities
 
   injects disk IO errors on devices permitted by setting
   /sys/block/<device>/make-it-fail or
-  /sys/block/<device>/<partition>/make-it-fail. (submit_bio_noacct())
+  /sys/block/<device>/<partition>/make-it-fail. (submit_bio())
 
 - fail_mmc_request
 
index a9d2a8544cd8ccccec9639b38dcb0c6014433e1b..8f574cb92fb3a789f7267e8f904215126762377f 100644 (file)
@@ -378,7 +378,7 @@ static void bio_alloc_rescue(struct work_struct *work)
                if (!bio)
                        break;
 
-               submit_bio_noacct(bio);
+               submit_bio(bio);
        }
 }
 
@@ -436,20 +436,17 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
  * previously allocated bio for IO before attempting to allocate a new one.
  * Failure to do so can cause deadlocks under memory pressure.
  *
- * Note that when running under submit_bio_noacct() (i.e. any block driver),
- * bios are not submitted until after you return - see the code in
- * submit_bio_noacct() that converts recursion into iteration, to prevent
- * stack overflows.
+ * Note that when running under submit_bio() (i.e. any block driver), bios are
+ * not submitted until after you return - see the code in submit_bio() that
+ * converts recursion into iteration, to prevent stack overflows.
  *
- * This would normally mean allocating multiple bios under submit_bio_noacct()
- * would be susceptible to deadlocks, but we have
- * deadlock avoidance code that resubmits any blocked bios from a rescuer
- * thread.
+ * This would normally mean allocating multiple bios under submit_bio() would be
+ * susceptible to deadlocks, but we have deadlock avoidance code that resubmits
+ * any blocked bios from a rescuer thread.
  *
  * However, we do not guarantee forward progress for allocations from other
- * mempools. Doing multiple allocations from the same mempool under
- * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
- * for per bio allocations.
+ * mempools. Doing multiple allocations from the same mempool under submit_bio()
+ * should be avoided - instead, use bio_set's front_pad for per bio allocations.
  *
  * Returns: Pointer to new bio on success, NULL on failure.
  */
@@ -466,15 +463,15 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
                return NULL;
 
        /*
-        * submit_bio_noacct() converts recursion to iteration; this means if
-        * we're running beneath it, any bios we allocate and submit will not be
+        * submit_bio() converts recursion to iteration; this means if we're
+        * running beneath it, any bios we allocate and submit will not be
         * submitted (and thus freed) until after we return.
         *
         * This exposes us to a potential deadlock if we allocate multiple bios
-        * from the same bio_set() while running underneath submit_bio_noacct().
-        * If we were to allocate multiple bios (say a stacking block driver
-        * that was splitting bios), we would deadlock if we exhausted the
-        * mempool's reserve.
+        * from the same bio_set() while running underneath submit_bio().  If we
+        * were to allocate multiple bios (say a stacking block driver that was
+        * splitting bios), we would deadlock if we exhausted the mempool's
+        * reserve.
         *
         * We solve this, and guarantee forward progress, with a rescuer
         * workqueue per bio_set. If we go to allocate and there are bios on
index a6bc915ac9155a560172db2326f65a5d9f084cf8..455186f782aa189e712ab15b8bb1c5cb9f777340 100644 (file)
@@ -678,7 +678,7 @@ static void __submit_bio(struct bio *bio)
  *  - We pretend that we have just taken it off a longer list, so we assign
  *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
  *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
- *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
+ *    bios through a recursive call to submit_bio.  If it did, we find a
  *    non-NULL value in bio_list and re-enter the loop from the top.
  *  - In this case we really did just take the bio of the top of the list (no
  *    pretending) and so remove it from bio_list, and call into ->submit_bio()
@@ -747,30 +747,26 @@ static void __submit_bio_noacct_mq(struct bio *bio)
 
 void submit_bio_noacct_nocheck(struct bio *bio)
 {
-       /*
-        * We only want one ->submit_bio to be active at a time, else stack
-        * usage with stacked devices could be a problem.  Use current->bio_list
-        * to collect a list of requests submited by a ->submit_bio method while
-        * it is active, and then process them after it returned.
-        */
-       if (current->bio_list)
-               bio_list_add(&current->bio_list[0], bio);
-       else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
+       if (!bio->bi_bdev->bd_disk->fops->submit_bio)
                __submit_bio_noacct_mq(bio);
        else
                __submit_bio_noacct(bio);
 }
 
 /**
- * submit_bio_noacct - re-submit a bio to the block device layer for I/O
- * @bio:  The bio describing the location in memory and on the device.
+ * submit_bio - submit a bio to the block device layer for I/O
+ * @bio: The &struct bio which describes the I/O
+ *
+ * submit_bio() is used to submit I/O requests to block devices.  It is passed a
+ * fully set up &struct bio that describes the I/O that needs to be done.  The
+ * bio will be send to the device described by the bi_bdev field.
  *
- * This is a version of submit_bio() that shall only be used for I/O that is
- * resubmitted to lower level drivers by stacking block drivers.  All file
- * systems and other upper level users of the block layer should use
- * submit_bio() instead.
+ * The success/failure status of the request, along with notification of
+ * completion, is delivered asynchronously through the ->bi_end_io() callback
+ * in @bio.  The bio must NOT be touched by the caller until ->bi_end_io() has
+ * been called.
  */
-void submit_bio_noacct(struct bio *bio)
+void submit_bio(struct bio *bio)
 {
        struct block_device *bdev = bio->bi_bdev;
        struct request_queue *q = bdev_get_queue(bdev);
@@ -779,6 +775,27 @@ void submit_bio_noacct(struct bio *bio)
 
        might_sleep();
 
+       if (blkcg_punt_bio_submit(bio))
+               return;
+
+       /*
+        * We only want one ->submit_bio to be active at a time, else stack
+        * usage with stacked devices could be a problem.  Use current->bio_list
+        * to collect a list of requests submited by a ->submit_bio method while
+        * it is active, and then process them after it returned.
+        */
+       if (current->bio_list) {
+               bio_list_add(&current->bio_list[0], bio);
+               return;
+       }
+
+       if (bio_op(bio) == REQ_OP_READ) {
+               task_io_account_read(bio->bi_iter.bi_size);
+               count_vm_events(PGPGIN, bio_sectors(bio));
+       } else if (bio_op(bio) == REQ_OP_WRITE) {
+               count_vm_events(PGPGOUT, bio_sectors(bio));
+       }
+
        plug = blk_mq_plug(q, bio);
        if (plug && plug->nowait)
                bio->bi_opf |= REQ_NOWAIT;
@@ -872,35 +889,6 @@ end_io:
        bio->bi_status = status;
        bio_endio(bio);
 }
-EXPORT_SYMBOL(submit_bio_noacct);
-
-/**
- * submit_bio - submit a bio to the block device layer for I/O
- * @bio: The &struct bio which describes the I/O
- *
- * submit_bio() is used to submit I/O requests to block devices.  It is passed a
- * fully set up &struct bio that describes the I/O that needs to be done.  The
- * bio will be send to the device described by the bi_bdev field.
- *
- * The success/failure status of the request, along with notification of
- * completion, is delivered asynchronously through the ->bi_end_io() callback
- * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
- * been called.
- */
-void submit_bio(struct bio *bio)
-{
-       if (blkcg_punt_bio_submit(bio))
-               return;
-
-       if (bio_op(bio) == REQ_OP_READ) {
-               task_io_account_read(bio->bi_iter.bi_size);
-               count_vm_events(PGPGIN, bio_sectors(bio));
-       } else if (bio_op(bio) == REQ_OP_WRITE) {
-               count_vm_events(PGPGOUT, bio_sectors(bio));
-       }
-
-       submit_bio_noacct(bio);
-}
 EXPORT_SYMBOL(submit_bio);
 
 /**
index 7c854584b52b506fece323d6275948a10f9a7849..f79b94cb059236f669c94079c1519c7f3d0f6ff1 100644 (file)
@@ -229,7 +229,7 @@ static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
                        return false;
                }
                bio_chain(split_bio, bio);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
                *bio_ptr = split_bio;
        }
 
index a496aaef85ba456c756965062764b9344ff3ce5e..64f17de133f8ad383bf9aa3508ef94e78a515f7c 100644 (file)
@@ -253,9 +253,8 @@ void __blk_crypto_free_request(struct request *rq)
  * kernel crypto API. When the crypto API fallback is used for encryption,
  * blk-crypto may choose to split the bio into 2 - the first one that will
  * continue to be processed and the second one that will be resubmitted via
- * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
- * of the aforementioned "first one", and *bio_ptr will be updated to this
- * bounce bio.
+ * submit_bio. A bounce bio will be allocated to encrypt the contents of the
+ * aforementioned "first one", and *bio_ptr will be updated to this bounce bio.
  *
  * Caller must ensure bio has bio_crypt_ctx.
  *
index 7771dacc99cb7d30328b277ac6866c8b2bf39f44..412222e9b2eb2ab656861f78ef0e24df9a487c47 100644 (file)
@@ -347,7 +347,7 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
 
                bio_chain(split, *bio);
                trace_block_split(split, (*bio)->bi_iter.bi_sector);
-               submit_bio_noacct(*bio);
+               submit_bio(*bio);
                *bio = split;
        }
 }
index 467be46d0e65620c08067579ff4fd30f1ce666a6..90212daf417f321cb42145566b436584ea39ba3d 100644 (file)
@@ -222,7 +222,7 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
        if (sectors < bio_sectors(*bio_orig)) {
                bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
                bio_chain(bio, *bio_orig);
-               submit_bio_noacct(*bio_orig);
+               submit_bio(*bio_orig);
                *bio_orig = bio;
        }
        bio = bounce_clone_bio(*bio_orig);
index 96881d5babd9e1bb356db1890fb9af1f55e2cb8f..388ed200d253a6d8fdd91eed8659be8fb74eaa08 100644 (file)
@@ -2279,7 +2279,7 @@ static void do_retry(struct work_struct *ws)
                 * workqueues instead.
                 */
 
-               /* We are not just doing submit_bio_noacct(),
+               /* We are not just doing submit_bio(),
                 * as we want to keep the start_time information. */
                inc_ap_bio(device);
                __drbd_make_request(device, bio);
index 3724c0a81f4176cd0c142ae221a62a5e4b1c03fe..59f1a3af8576f54ed1d44818077e5d8d3362eda6 100644 (file)
@@ -1703,7 +1703,7 @@ next_bio:
                if (drbd_insert_fault(device, fault_type))
                        bio_io_error(bio);
                else
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
        } while (bios);
        return 0;
 }
index c04394518b07f53b98722e32ede77aeb6e965f6d..16ee19d492e27aae00c3236088ee42199a31c027 100644 (file)
@@ -1164,7 +1164,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
                else if (bio_op(bio) == REQ_OP_DISCARD)
                        drbd_process_discard_or_zeroes_req(req, EE_TRIM);
                else
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
                put_ldev(device);
        } else
                bio_io_error(bio);
index 0f9956f4e9c4233cc82f3e87affba0e2b46e0d9a..74d8bb048a3f0297ce2502f5451b5a6555ae05bd 100644 (file)
@@ -1523,7 +1523,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
                                          &drbd_io_bio_set);
        req->private_bio->bi_private = req;
        req->private_bio->bi_end_io = drbd_request_endio;
-       submit_bio_noacct(req->private_bio);
+       submit_bio(req->private_bio);
 
        return 0;
 }
index 86c8794ede415c0476f6eaddf32c63c2c946fb0e..eff4c7989f496b5bf6d4466c3ac2d8d0d3cb3c5e 100644 (file)
@@ -908,7 +908,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
                }
 
                atomic_inc(&pd->cdrw.pending_bios);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
        }
 }
 
index 9ed9c955add7b9c957f2f1be5628ca633bfa7a08..1ee91e58f7c218b82d33bd517ea8a82cb57a2315 100644 (file)
@@ -933,7 +933,7 @@ static inline void closure_bio_submit(struct cache_set *c,
                bio_endio(bio);
                return;
        }
-       submit_bio_noacct(bio);
+       submit_bio(bio);
 }
 
 /*
index ad9f16689419d31bc614e2546d9d5f18a82e72e3..4dd780b94c8da6b7787f7cfa24ea9f44809c2501 100644 (file)
@@ -970,7 +970,7 @@ err:
  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
  * in from disk if necessary.
  *
- * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
+ * If IO is necessary and running under submit_bio, returns -EAGAIN.
  *
  * The btree node will have either a read or a write lock held, depending on
  * level and op->lock.
index fdd0194f84dd089572f34cdcad1401c8748db9b0..43788a532e18c8b00f1ea5314d3569971c09559f 100644 (file)
@@ -1118,7 +1118,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
            !blk_queue_discard(bdev_get_queue(dc->bdev)))
                bio->bi_end_io(bio);
        else
-               submit_bio_noacct(bio);
+               submit_bio(bio);
 }
 
 static void quit_max_writeback_rate(struct cache_set *c,
@@ -1203,8 +1203,7 @@ void cached_dev_submit_bio(struct bio *bio)
 
                if (!bio->bi_iter.bi_size) {
                        /*
-                        * can't call bch_journal_meta from under
-                        * submit_bio_noacct
+                        * can't call bch_journal_meta from under submit_bio
                         */
                        continue_at_nobarrier(&s->cl,
                                              cached_dev_nodata,
@@ -1289,7 +1288,7 @@ void flash_dev_submit_bio(struct bio *bio)
 
        if (!bio->bi_iter.bi_size) {
                /*
-                * can't call bch_journal_meta from under submit_bio_noacct
+                * can't call bch_journal_meta from under submit_bio
                 */
                continue_at_nobarrier(&s->cl,
                                      flash_dev_nodata,
index 128316a73d0163a8513d86e30bcfcc306c4dc4af..690ce2fb7866a0c4c2d626a8c2f5803dcb020db0 100644 (file)
@@ -323,7 +323,7 @@ static void submit_bios(struct bio_list *bios)
        blk_start_plug(&plug);
 
        while ((bio = bio_list_pop(bios)))
-               submit_bio_noacct(bio);
+               submit_bio(bio);
 
        blk_finish_plug(&plug);
 }
@@ -339,7 +339,7 @@ static void submit_bios(struct bio_list *bios)
 static void issue_bio(struct clone *clone, struct bio *bio)
 {
        if (!bio_triggers_commit(clone, bio)) {
-               submit_bio_noacct(bio);
+               submit_bio(bio);
                return;
        }
 
@@ -466,7 +466,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
                bio_region_range(clone, bio, &rs, &nr_regions);
                trim_bio(bio, region_to_sector(clone, rs),
                         nr_regions << clone->region_shift);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
        } else
                bio_endio(bio);
 }
@@ -858,7 +858,7 @@ static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio
        bio->bi_private = hd;
 
        atomic_inc(&hd->clone->hydrations_in_flight);
-       submit_bio_noacct(bio);
+       submit_bio(bio);
 }
 
 /*
@@ -1270,7 +1270,7 @@ static void process_deferred_flush_bios(struct clone *clone)
                         */
                        bio_endio(bio);
                } else {
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
                }
        }
 }
index 1f6bf152b3c74d6573fdc3477f63d15d0874b32e..e3ed40272bb611a12797a6a20291414c85bbefc4 100644 (file)
@@ -1293,7 +1293,7 @@ static void process_deferred_bios(struct era *era)
                         */
                        if (commit_needed)
                                set_bit(get_block(era, bio), ws->bits);
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
                }
                blk_finish_plug(&plug);
        }
index c58a5111cb575fe8dc9f24dd900f049bafe99ca0..8bd0f555d1c6846120cb82937d710da2446b1c1a 100644 (file)
@@ -2282,12 +2282,11 @@ offload_to_thread:
                dio->in_flight = (atomic_t)ATOMIC_INIT(1);
                dio->completion = NULL;
 
-               submit_bio_noacct(bio);
-
+               submit_bio(bio);
                return;
        }
 
-       submit_bio_noacct(bio);
+       submit_bio(bio);
 
        if (need_sync_io) {
                wait_for_completion_io(&read_comp);
index 6ed9d2731254889723a0b7a3fc7a5868575a54b6..85d6e317985a70e115799ea0895719687bd369cd 100644 (file)
@@ -714,7 +714,7 @@ static void process_queued_bios(struct work_struct *work)
                        bio_endio(bio);
                        break;
                case DM_MAPIO_REMAPPED:
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
                        break;
                case DM_MAPIO_SUBMITTED:
                        break;
index 8811d484fdd14c77cd97fb567104b3fa9d83f496..a5255ed1da7bcd6be9001a3aed6f69c0fa6cddb5 100644 (file)
@@ -779,7 +779,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
                        wakeup_mirrord(ms);
                } else {
                        map_bio(get_default_mirror(ms), bio);
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
                }
        }
 }
index 3bb5cff5d6fc38acdeab6f27301119816a4cdc50..f820ea4bdcf9a93236eab169ec138d8c33e3a369 100644 (file)
@@ -252,7 +252,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
 
        /*
         * Issue the synchronous I/O from a different thread
-        * to avoid submit_bio_noacct recursion.
+        * to avoid submit_bio recursion.
         */
        INIT_WORK_ONSTACK(&req.work, do_metadata);
        queue_work(ps->metadata_wq, &req.work);
index 0d336b5ec5714eb1d182b889cd13467c84f3f5a2..3970c0174d54e4ff5eff138b29369a24aa3d82e2 100644 (file)
@@ -1575,7 +1575,7 @@ static void flush_bios(struct bio *bio)
        while (bio) {
                n = bio->bi_next;
                bio->bi_next = NULL;
-               submit_bio_noacct(bio);
+               submit_bio(bio);
                bio = n;
        }
 }
@@ -1595,7 +1595,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
                bio->bi_next = NULL;
                r = do_origin(s->origin, bio, false);
                if (r == DM_MAPIO_REMAPPED)
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
                bio = n;
        }
 }
@@ -1836,7 +1836,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
        bio->bi_end_io = full_bio_end_io;
        bio->bi_private = callback_data;
 
-       submit_bio_noacct(bio);
+       submit_bio(bio);
 }
 
 static struct dm_snap_pending_exception *
index 80133aae0db372f0629f70cdb8563125b46dadd1..75031b06299ae5b7a337241aa80a7befbffbe716 100644 (file)
@@ -705,7 +705,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
 
        verity_submit_prefetch(v, io);
 
-       submit_bio_noacct(bio);
+       submit_bio(bio);
 
        return DM_MAPIO_SUBMITTED;
 }
index 5630b470ba429be4c6c046612ebeb921da46258d..2b9ade48f222ffe19509de8866d0b0243c1de353 100644 (file)
@@ -1300,7 +1300,7 @@ static int writecache_flush_thread(void *data)
                                           bio_end_sector(bio));
                        wc_unlock(wc);
                        bio_set_dev(bio, wc->dev->bdev);
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
                } else {
                        writecache_flush(wc);
                        wc_unlock(wc);
index cac295cc8840efe5d78d60438f3276359c3b2c1d..a470664fc790efd1e130b9af0aa0f8bc479f8018 100644 (file)
@@ -139,7 +139,7 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
        bio_advance(bio, clone->bi_iter.bi_size);
 
        refcount_inc(&bioctx->ref);
-       submit_bio_noacct(clone);
+       submit_bio(clone);
 
        if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
                zone->wp_block += nr_blocks;
index ad2e0bbeb559a584434a1b90da504ab0b7708c71..76dfd870a32b0af89ae5750611f1b6c2046626ca 100644 (file)
@@ -1190,7 +1190,7 @@ static inline void __dm_submit_bio_remap(struct bio *clone,
                                         dev_t dev, sector_t old_sector)
 {
        trace_block_bio_remap(clone, dev, old_sector);
-       submit_bio_noacct(clone);
+       submit_bio(clone);
 }
 
 /*
@@ -1573,7 +1573,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
                goto out;
 
        /*
-        * Remainder must be passed to submit_bio_noacct() so it gets handled
+        * Remainder must be passed to submit_bio() so it gets handled
         * *after* bios already submitted have been completely processed.
         * We take a clone of the original to store in ci.io->orig_bio to be
         * used by dm_end_io_acct() and for dm_io_complete() to use for
@@ -1583,7 +1583,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
                             GFP_NOIO, &md->queue->bio_split);
        bio_chain(orig_bio, bio);
        trace_block_split(orig_bio, bio->bi_iter.bi_sector);
-       submit_bio_noacct(bio);
+       submit_bio(bio);
 out:
        if (!orig_bio)
                orig_bio = bio;
@@ -1668,10 +1668,10 @@ static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
        /*
         * Restore .bi_private before possibly completing dm_io.
         *
-        * bio_poll() is only possible once @bio has been completely
-        * submitted via submit_bio_noacct()'s depth-first submission.
-        * So there is no dm_queue_poll_io() race associated with
-        * clearing REQ_DM_POLL_LIST here.
+        * bio_poll() is only possible once @bio has been completely submitted
+        * via submit_bio()'s depth-first submission.  So there is no
+        * dm_queue_poll_io() race associated with clearing REQ_DM_POLL_LIST
+        * here.
         */
        bio->bi_opf &= ~REQ_DM_POLL_LIST;
        bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data;
@@ -2380,7 +2380,7 @@ static void dm_wq_work(struct work_struct *work)
                if (!bio)
                        break;
 
-               submit_bio_noacct(bio);
+               submit_bio(bio);
        }
 }
 
index 50ad818978a4333bc225cd3626c3265ded5167d0..c96f1fb95f9d39cb88d1b2f303bbcd081e5ea6f9 100644 (file)
@@ -169,7 +169,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
        if (bio_data_dir(bio) == WRITE) {
                /* write request */
                if (atomic_read(&conf->counters[WriteAll])) {
-                       /* special case - don't decrement, don't submit_bio_noacct,
+                       /* special case - don't decrement, don't submit_bio,
                         * just fail immediately
                         */
                        bio_io_error(bio);
@@ -214,7 +214,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
        } else
                bio_set_dev(bio, conf->rdev->bdev);
 
-       submit_bio_noacct(bio);
+       submit_bio(bio);
        return true;
 }
 
index 0f55b079371b136abf1fb5c8b338c91f4e7eba6f..e9bbf2d483669d3e863deb24662dc263a9a1b02f 100644 (file)
@@ -243,7 +243,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
                struct bio *split = bio_split(bio, end_sector - bio_sector,
                                              GFP_NOIO, &mddev->bio_set);
                bio_chain(split, bio);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
                bio = split;
        }
 
@@ -260,7 +260,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
                        trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
                                              bio_sector);
                mddev_check_write_zeroes(mddev, bio);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
        }
        return true;
 
index 1c6dbf92c1368689fbf022bdcd0ddf8d63626f44..e7da4e175582e089ed4b6f04939e483f7b96cf89 100644 (file)
@@ -128,7 +128,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
        mp_bh->bio.bi_end_io = multipath_end_request;
        mp_bh->bio.bi_private = mp_bh;
        mddev_check_write_zeroes(mddev, &mp_bh->bio);
-       submit_bio_noacct(&mp_bh->bio);
+       submit_bio(&mp_bh->bio);
        return true;
 }
 
@@ -322,7 +322,7 @@ static void multipathd(struct md_thread *thread)
                        bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
                        bio->bi_end_io = multipath_end_request;
                        bio->bi_private = mp_bh;
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
                }
        }
        spin_unlock_irqrestore(&conf->device_lock, flags);
index 309b3af906ad39c7e19608b54203c3465494d6cc..a129840562e63dd539dd42481467aabd93d28002 100644 (file)
@@ -8595,7 +8595,7 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
                trace_block_bio_remap(discard_bio,
                                disk_devt(mddev->gendisk),
                                bio->bi_iter.bi_sector);
-       submit_bio_noacct(discard_bio);
+       submit_bio(discard_bio);
 }
 EXPORT_SYMBOL_GPL(md_submit_discard_bio);
 
index b21e101183f444054ef24a32e6d3ec783392c776..6780e9304058aab51669d62dc1613d0d99ece4c5 100644 (file)
@@ -464,7 +464,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
                        zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
                        &mddev->bio_set);
                bio_chain(split, bio);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
                bio = split;
                end = zone->zone_end;
        } else
@@ -559,7 +559,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
                struct bio *split = bio_split(bio, sectors, GFP_NOIO,
                                              &mddev->bio_set);
                bio_chain(split, bio);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
                bio = split;
        }
 
@@ -594,7 +594,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
                trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
                                      bio_sector);
        mddev_check_write_zeroes(mddev, bio);
-       submit_bio_noacct(bio);
+       submit_bio(bio);
        return true;
 }
 
index 99d5464a51f810dd7f479df6d8ab61c123926a87..6a64e6098be3bd3c5111679b6b37b8238806fb59 100644 (file)
@@ -806,7 +806,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
                        /* Just ignore it */
                        bio_endio(bio);
                else
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
                bio = next;
                cond_resched();
        }
@@ -1299,7 +1299,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
                struct bio *split = bio_split(bio, max_sectors,
                                              gfp, &conf->bio_split);
                bio_chain(split, bio);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
                bio = split;
                r1_bio->master_bio = bio;
                r1_bio->sectors = max_sectors;
@@ -1328,7 +1328,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
                trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
                                      r1_bio->sector);
 
-       submit_bio_noacct(read_bio);
+       submit_bio(read_bio);
 }
 
 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
@@ -1499,7 +1499,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
                struct bio *split = bio_split(bio, max_sectors,
                                              GFP_NOIO, &conf->bio_split);
                bio_chain(split, bio);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
                bio = split;
                r1_bio->master_bio = bio;
                r1_bio->sectors = max_sectors;
@@ -2254,7 +2254,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
                atomic_inc(&r1_bio->remaining);
                md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
 
-               submit_bio_noacct(wbio);
+               submit_bio(wbio);
        }
 
        put_sync_write_buf(r1_bio, 1);
@@ -2939,7 +2939,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                                md_sync_acct_bio(bio, nr_sectors);
                                if (read_targets == 1)
                                        bio->bi_opf &= ~MD_FAILFAST;
-                               submit_bio_noacct(bio);
+                               submit_bio(bio);
                        }
                }
        } else {
@@ -2948,7 +2948,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                md_sync_acct_bio(bio, nr_sectors);
                if (read_targets == 1)
                        bio->bi_opf &= ~MD_FAILFAST;
-               submit_bio_noacct(bio);
+               submit_bio(bio);
        }
        return nr_sectors;
 }
index dfe7d62d3fbdd1b5b2e1a387ad68de279097886f..89811bc5776ab3f6b6bc9546880213e2a0215aaf 100644 (file)
@@ -892,7 +892,7 @@ static void flush_pending_writes(struct r10conf *conf)
                                /* Just ignore it */
                                bio_endio(bio);
                        else
-                               submit_bio_noacct(bio);
+                               submit_bio(bio);
                        bio = next;
                }
                blk_finish_plug(&plug);
@@ -1087,7 +1087,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
                        /* Just ignore it */
                        bio_endio(bio);
                else
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
                bio = next;
        }
        kfree(plug);
@@ -1189,7 +1189,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
                                              gfp, &conf->bio_split);
                bio_chain(split, bio);
                allow_barrier(conf);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
                wait_barrier(conf, false);
                bio = split;
                r10_bio->master_bio = bio;
@@ -1216,7 +1216,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
        if (mddev->gendisk)
                trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
                                      r10_bio->sector);
-       submit_bio_noacct(read_bio);
+       submit_bio(read_bio);
        return;
 }
 
@@ -1493,7 +1493,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
                                              GFP_NOIO, &conf->bio_split);
                bio_chain(split, bio);
                allow_barrier(conf);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
                wait_barrier(conf, false);
                bio = split;
                r10_bio->master_bio = bio;
@@ -1664,7 +1664,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
                bio_chain(split, bio);
                allow_barrier(conf);
                /* Resend the fist split part */
-               submit_bio_noacct(split);
+               submit_bio(split);
                wait_barrier(conf, false);
        }
        div_u64_rem(bio_end, stripe_size, &remainder);
@@ -1674,7 +1674,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
                bio_chain(split, bio);
                allow_barrier(conf);
                /* Resend the second split part */
-               submit_bio_noacct(bio);
+               submit_bio(bio);
                bio = split;
                wait_barrier(conf, false);
        }
@@ -2429,7 +2429,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
                        tbio->bi_opf |= MD_FAILFAST;
                tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
-               submit_bio_noacct(tbio);
+               submit_bio(tbio);
        }
 
        /* Now write out to any replacement devices
@@ -2448,7 +2448,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                atomic_inc(&r10_bio->remaining);
                md_sync_acct(conf->mirrors[d].replacement->bdev,
                             bio_sectors(tbio));
-               submit_bio_noacct(tbio);
+               submit_bio(tbio);
        }
 
 done:
@@ -2570,22 +2570,22 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
        d = r10_bio->devs[1].devnum;
        wbio = r10_bio->devs[1].bio;
        wbio2 = r10_bio->devs[1].repl_bio;
-       /* Need to test wbio2->bi_end_io before we call
-        * submit_bio_noacct as if the former is NULL,
-        * the latter is free to free wbio2.
+       /*
+        * Need to test wbio2->bi_end_io before we call submit_bio as if the
+        * former is NULL, the latter is free to free wbio2.
         */
        if (wbio2 && !wbio2->bi_end_io)
                wbio2 = NULL;
        if (wbio->bi_end_io) {
                atomic_inc(&conf->mirrors[d].rdev->nr_pending);
                md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
-               submit_bio_noacct(wbio);
+               submit_bio(wbio);
        }
        if (wbio2) {
                atomic_inc(&conf->mirrors[d].replacement->nr_pending);
                md_sync_acct(conf->mirrors[d].replacement->bdev,
                             bio_sectors(wbio2));
-               submit_bio_noacct(wbio2);
+               submit_bio(wbio2);
        }
 }
 
@@ -3219,7 +3219,7 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf)
  * a number of r10_bio structures, one for each out-of-sync device.
  * As we setup these structures, we collect all bio's together into a list
  * which we then process collectively to add pages, and then process again
- * to pass to submit_bio_noacct.
+ * to pass to submit_bio.
  *
  * The r10_bio structures are linked using a borrowed master_bio pointer.
  * This link is counted in ->remaining.  When the r10_bio that points to NULL
@@ -3826,7 +3826,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                if (bio->bi_end_io == end_sync_read) {
                        md_sync_acct_bio(bio, nr_sectors);
                        bio->bi_status = 0;
-                       submit_bio_noacct(bio);
+                       submit_bio(bio);
                }
        }
 
@@ -4969,7 +4969,7 @@ read_more:
        md_sync_acct_bio(read_bio, r10_bio->sectors);
        atomic_inc(&r10_bio->remaining);
        read_bio->bi_next = NULL;
-       submit_bio_noacct(read_bio);
+       submit_bio(read_bio);
        sectors_done += nr_sectors;
        if (sector_nr <= last)
                goto read_more;
@@ -5032,7 +5032,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                md_sync_acct_bio(b, r10_bio->sectors);
                atomic_inc(&r10_bio->remaining);
                b->bi_next = NULL;
-               submit_bio_noacct(b);
+               submit_bio(b);
        }
        end_reshape_request(r10_bio);
 }
index 351d341a1ffa4cf2e9a6641541a39ed48b1a4609..a5048c2af73bc0d5f0df5d2abc891c37922ae035 100644 (file)
@@ -950,7 +950,7 @@ static void dispatch_bio_list(struct bio_list *tmp)
        struct bio *bio;
 
        while ((bio = bio_list_pop(tmp)))
-               submit_bio_noacct(bio);
+               submit_bio(bio);
 }
 
 static int cmp_stripe(void *priv, const struct list_head *a,
@@ -1227,7 +1227,7 @@ again:
                        if (should_defer && op_is_write(op))
                                bio_list_add(&pending_bios, bi);
                        else
-                               submit_bio_noacct(bi);
+                               submit_bio(bi);
                }
                if (rrdev) {
                        if (s->syncing || s->expanding || s->expanded
@@ -1274,7 +1274,7 @@ again:
                        if (should_defer && op_is_write(op))
                                bio_list_add(&pending_bios, rbi);
                        else
-                               submit_bio_noacct(rbi);
+                               submit_bio(rbi);
                }
                if (!rdev && !rrdev) {
                        if (op_is_write(op))
@@ -5460,7 +5460,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
        if (mddev->gendisk)
                trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
                                      raid_bio->bi_iter.bi_sector);
-       submit_bio_noacct(align_bio);
+       submit_bio(align_bio);
        return 1;
 
 out_rcu_unlock:
@@ -5479,7 +5479,7 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
                struct r5conf *conf = mddev->private;
                split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
                bio_chain(split, raid_bio);
-               submit_bio_noacct(raid_bio);
+               submit_bio(raid_bio);
                raid_bio = split;
        }
 
index 1b31f19e10537d2f328f7fa7f7551a5fe6e094a7..e24e2e6766da4e07fd4ed6481890bfe215760223 100644 (file)
@@ -355,7 +355,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
                bio->bi_opf |= REQ_NVME_MPATH;
                trace_block_bio_remap(bio, disk_devt(ns->head->disk),
                                      bio->bi_iter.bi_sector);
-               submit_bio_noacct(bio);
+               submit_bio(bio);
        } else if (nvme_available_path(head)) {
                dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
 
@@ -467,7 +467,7 @@ static void nvme_requeue_work(struct work_struct *work)
                next = bio->bi_next;
                bio->bi_next = NULL;
 
-               submit_bio_noacct(bio);
+               submit_bio(bio);
        }
 }
 
index 60d01613899711c3fedca8001fbc0571bd32f096..043e10f9d37358a44e5027f29faabb04ce985a2d 100644 (file)
@@ -859,7 +859,6 @@ void blk_request_module(dev_t devt);
 
 extern int blk_register_queue(struct gendisk *disk);
 extern void blk_unregister_queue(struct gendisk *disk);
-void submit_bio_noacct(struct bio *bio);
 
 extern int blk_lld_busy(struct request_queue *q);
 extern void blk_queue_split(struct bio **);