#ifdef CONFIG_BLK_MQ_STACKING
 /**
  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
- * @q:  the queue to submit the request
  * @rq: the request being queued
  */
-blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+blk_status_t blk_insert_cloned_request(struct request *rq)
 {
+       struct request_queue *q = rq->q;
        unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
        blk_status_t ret;
 
                return BLK_STS_IOERR;
        }
 
-       if (rq->q->disk &&
-           should_fail_request(rq->q->disk->part0, blk_rq_bytes(rq)))
+       if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
                return BLK_STS_IOERR;
 
        if (blk_crypto_insert_cloned_request(rq))
         * bypass a potential scheduler on the bottom device for
         * insert.
         */
-       blk_mq_run_dispatch_ops(rq->q,
+       blk_mq_run_dispatch_ops(q,
                        ret = blk_mq_request_issue_directly(rq, true));
        if (ret)
                blk_account_io_done(rq, ktime_get_ns());
 
                clone->rq_flags |= RQF_IO_STAT;
 
        clone->start_time_ns = ktime_get_ns();
-       r = blk_insert_cloned_request(clone->q, clone);
+       r = blk_insert_cloned_request(clone);
        if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
                /* must complete clone in terms of original request */
                dm_complete_request(rq, r);
 
                struct bio_set *bs, gfp_t gfp_mask,
                int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
 void blk_rq_unprep_clone(struct request *rq);
-blk_status_t blk_insert_cloned_request(struct request_queue *q,
-               struct request *rq);
+blk_status_t blk_insert_cloned_request(struct request *rq);
 
 struct rq_map_data {
        struct page **pages;