From ff8a76f62b0ba0ad092e87a5b48457e93cd378de Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 29 Dec 2016 13:33:03 -0800 Subject: [PATCH] nvme: switch abort to blk_execute_rq_nowait And remove the now unused nvme_submit_cmd helper. Signed-off-by: Christoph Hellwig Acked-by: Keith Busch Signed-off-by: Jens Axboe (cherry picked from commit e7a2a87d5938bbebe1637c82fbde94ea6be3ef78) Orabug: 25130845 Signed-off-by: Ashok Vairavan Reviewed-by: Martin K. Petersen --- drivers/nvme/host/pci.c | 56 ++++++++++++++++++----------------------- include/linux/blk-mq.h | 6 +++++ 2 files changed, 30 insertions(+), 32 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 11ac0aab13d2..fe1306c0b556 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -329,16 +329,6 @@ static void abort_completion(struct nvme_queue *nvmeq, void *ctx, atomic_inc(&nvmeq->dev->ctrl.abort_limit); } -static void async_completion(struct nvme_queue *nvmeq, void *ctx, - struct nvme_completion *cqe) -{ - struct async_cmd_info *cmdinfo = ctx; - cmdinfo->result = le32_to_cpup(&cqe->result); - cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; - queue_kthread_work(cmdinfo->worker, &cmdinfo->work); - blk_mq_free_request(cmdinfo->req); -} - /** * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell * @nvmeq: The queue to use @@ -362,14 +352,6 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq, nvmeq->sq_tail = tail; } -static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) -{ - unsigned long flags; - spin_lock_irqsave(&nvmeq->q_lock, flags); - __nvme_submit_cmd(nvmeq, cmd); - spin_unlock_irqrestore(&nvmeq->q_lock, flags); -} - static __le64 **iod_list(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); @@ -956,13 +938,24 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); } +static void abort_endio(struct request *req, int error) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = iod->nvmeq; + u16 status = req->errors; + + dev_warn(nvmeq->dev->ctrl.device, "Abort status:%x result:%x", status); + atomic_inc(&nvmeq->dev->ctrl.abort_limit); + + blk_mq_free_request(req); +} + static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq = iod->nvmeq; struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; - struct nvme_cmd_info *abort_cmd; struct nvme_command cmd; /* @@ -1000,32 +993,31 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) return BLK_EH_HANDLED; } - if (atomic_dec_and_test(&dev->ctrl.abort_limit)) - return BLK_EH_RESET_TIMER; - iod->aborted = 1; - abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, GFP_ATOMIC, - false); - if (IS_ERR(abort_req)) { + if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { atomic_inc(&dev->ctrl.abort_limit); return BLK_EH_RESET_TIMER; } - abort_cmd = blk_mq_rq_to_pdu(abort_req); - nvme_set_info(abort_cmd, abort_req, abort_completion); - memset(&cmd, 0, sizeof(cmd)); cmd.abort.opcode = nvme_admin_abort_cmd; cmd.abort.cid = req->tag; cmd.abort.sqid = cpu_to_le16(nvmeq->qid); - cmd.abort.command_id = abort_req->tag; - - cmd_rq->aborted = 1; dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n", req->tag, nvmeq->qid); - nvme_submit_cmd(dev->queues[0], &cmd); + + abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, + BLK_MQ_REQ_NOWAIT); + if (IS_ERR(abort_req)) { + atomic_inc(&dev->ctrl.abort_limit); + return BLK_EH_RESET_TIMER; + } + + abort_req->timeout = ADMIN_TIMEOUT; + abort_req->end_io_data = NULL; + blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); /* * The aborted req will be completed on receiving the abort req. diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 94b640c92792..4563ce91788d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -179,6 +179,12 @@ void blk_mq_insert_request(struct request *, bool, bool, bool); void blk_mq_free_request(struct request *rq); void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); + +enum { + BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */ + BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */ +}; + struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); -- 2.50.1