]> www.infradead.org Git - nvme.git/commitdiff
nvme-tcp: open-code nvme_tcp_queue_request() for R2T
authorHannes Reinecke <hare@kernel.org>
Thu, 3 Apr 2025 06:55:20 +0000 (08:55 +0200)
committerChristoph Hellwig <hch@lst.de>
Tue, 22 Apr 2025 08:08:32 +0000 (10:08 +0200)
When handling an R2T PDU we short-circuit nvme_tcp_queue_request()
as we should not attempt to send consecutive PDUs. So open-code
nvme_tcp_queue_request() for R2T and drop the last argument.

Signed-off-by: Hannes Reinecke <hare@kernel.org>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
drivers/nvme/host/tcp.c

index e20a994d63c24b80a2ca0eb796d60f2adeb32004..a9d455c39652c523058f76944d54cdc33fdabd9c 100644 (file)
@@ -403,7 +403,7 @@ static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
 }
 
 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
-               bool sync, bool last)
+               bool last)
 {
        struct nvme_tcp_queue *queue = req->queue;
        bool empty;
@@ -417,7 +417,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
         * are on the same cpu, so we don't introduce contention.
         */
        if (queue->io_cpu == raw_smp_processor_id() &&
-           sync && empty && mutex_trylock(&queue->send_mutex)) {
+           empty && mutex_trylock(&queue->send_mutex)) {
                nvme_tcp_send_all(queue);
                mutex_unlock(&queue->send_mutex);
        }
@@ -770,7 +770,9 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
        req->ttag = pdu->ttag;
 
        nvme_tcp_setup_h2c_data_pdu(req);
-       nvme_tcp_queue_request(req, false, true);
+
+       llist_add(&req->lentry, &queue->req_list);
+       queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
 
        return 0;
 }
@@ -2610,7 +2612,7 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
        ctrl->async_req.curr_bio = NULL;
        ctrl->async_req.data_len = 0;
 
-       nvme_tcp_queue_request(&ctrl->async_req, true, true);
+       nvme_tcp_queue_request(&ctrl->async_req, true);
 }
 
 static void nvme_tcp_complete_timed_out(struct request *rq)
@@ -2762,7 +2764,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        nvme_start_request(rq);
 
-       nvme_tcp_queue_request(req, true, bd->last);
+       nvme_tcp_queue_request(req, bd->last);
 
        return BLK_STS_OK;
 }