if (blk_crypto_insert_cloned_request(rq))
                return BLK_STS_IOERR;
 
-       if (blk_queue_io_stat(q))
-               blk_account_io_start(rq);
+       blk_account_io_start(rq);
 
        /*
         * Since we have a scheduler attached on the top device,
        }
 }
 
-void blk_account_io_done(struct request *req, u64 now)
+void __blk_account_io_done(struct request *req, u64 now)
 {
-       /*
-        * Account IO completion.  flush_rq isn't accounted as a
-        * normal IO on queueing nor completion.  Accounting the
-        * containing request is enough.
-        */
-       if (req->part && blk_do_io_stat(req) &&
-           !(req->rq_flags & RQF_FLUSH_SEQ)) {
-               const int sgrp = op_stat_group(req_op(req));
+       const int sgrp = op_stat_group(req_op(req));
 
-               part_stat_lock();
-               update_io_ticks(req->part, jiffies, true);
-               part_stat_inc(req->part, ios[sgrp]);
-               part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
-               part_stat_unlock();
-       }
+       part_stat_lock();
+       update_io_ticks(req->part, jiffies, true);
+       part_stat_inc(req->part, ios[sgrp]);
+       part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
+       part_stat_unlock();
 }
 
-void blk_account_io_start(struct request *rq)
+void __blk_account_io_start(struct request *rq)
 {
-       if (!blk_do_io_stat(rq))
-               return;
-
        /* passthrough requests can hold bios that do not have ->bi_bdev set */
        if (rq->bio && rq->bio->bi_bdev)
                rq->part = rq->bio->bi_bdev;
 
 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
                        struct bio *bio, unsigned int nr_segs);
 
-void blk_account_io_start(struct request *req);
-void blk_account_io_done(struct request *req, u64 now);
+void __blk_account_io_start(struct request *req);
+void __blk_account_io_done(struct request *req, u64 now);
 
 /*
  * Plug flush limits
  */
 static inline bool blk_do_io_stat(struct request *rq)
 {
-       return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
+       return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk;
+}
+
+static inline void blk_account_io_done(struct request *req, u64 now)
+{
+       /*
+        * Account IO completion.  flush_rq isn't accounted as a
+        * normal IO on queueing nor completion.  Accounting the
+        * containing request is enough.
+        */
+       if (blk_do_io_stat(req) && req->part &&
+           !(req->rq_flags & RQF_FLUSH_SEQ))
+               __blk_account_io_done(req, now);
+}
+
+static inline void blk_account_io_start(struct request *req)
+{
+       if (blk_do_io_stat(req))
+               __blk_account_io_start(req);
 }
 
 static inline void req_set_nomerge(struct request_queue *q, struct request *req)