Now we update driver tags request table in blk_mq_get_driver_tag(),
so the driver that support queue_rqs() have to update that inflight
table by itself.
Move it to blk_mq_start_request(), which is a better place where
we setup the deadline for request timeout check. And it's just
where the request becomes inflight.
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20230913151616.3164338-5-chengming.zhou@linux.dev
Signed-off-by: Jens Axboe <axboe@kernel.dk>
 
        blk_add_timer(rq);
        WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
+       rq->mq_hctx->tags->rqs[rq->tag] = rq;
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
        if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
 
 
 static inline bool blk_mq_get_driver_tag(struct request *rq)
 {
-       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
-
        if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
                return false;
 
-       hctx->tags->rqs[rq->tag] = rq;
        return true;
 }
 
 
        struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
        struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
 
-       req->mq_hctx->tags->rqs[req->tag] = req;
-
        return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
 }
 
 
        if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
                return false;
 
-       req->mq_hctx->tags->rqs[req->tag] = req;
        return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
 }