return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
 }
 
-bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
-                          bool wait)
+bool blk_mq_get_driver_tag(struct request *rq)
 {
        struct blk_mq_alloc_data data = {
                .q = rq->q,
                .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
-               .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
+               .flags = BLK_MQ_REQ_NOWAIT,
        };
 
-       might_sleep_if(wait);
-
        if (rq->tag != -1)
                goto done;
 
        }
 
 done:
-       if (hctx)
-               *hctx = data.hctx;
        return rq->tag != -1;
 }
 
                 * Don't clear RESTART here, someone else could have set it.
                 * At most this will cost an extra queue run.
                 */
-               return blk_mq_get_driver_tag(rq, hctx, false);
+               return blk_mq_get_driver_tag(rq);
        }
 
        wait = &this_hctx->dispatch_wait;
         * allocation failure and adding the hardware queue to the wait
         * queue.
         */
-       ret = blk_mq_get_driver_tag(rq, hctx, false);
+       ret = blk_mq_get_driver_tag(rq);
        if (!ret) {
                spin_unlock(&this_hctx->lock);
                return false;
                if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
                        break;
 
-               if (!blk_mq_get_driver_tag(rq, NULL, false)) {
+               if (!blk_mq_get_driver_tag(rq)) {
                        /*
                         * The initial allocation attempt failed, so we need to
                         * rerun the hardware queue when a tag is freed. The
                        bd.last = true;
                else {
                        nxt = list_first_entry(list, struct request, queuelist);
-                       bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
+                       bd.last = !blk_mq_get_driver_tag(nxt);
                }
 
                ret = q->mq_ops->queue_rq(hctx, &bd);
        if (!blk_mq_get_dispatch_budget(hctx))
                goto insert;
 
-       if (!blk_mq_get_driver_tag(rq, NULL, false)) {
+       if (!blk_mq_get_driver_tag(rq)) {
                blk_mq_put_dispatch_budget(hctx);
                goto insert;
        }
 
 void blk_mq_wake_waiters(struct request_queue *q);
 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
-bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
-                               bool wait);
+bool blk_mq_get_driver_tag(struct request *rq);
 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
                                        struct blk_mq_ctx *start);