]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: merge nvme_abort_req and nvme_timeout
authorChristoph Hellwig <hch@lst.de>
Thu, 22 Oct 2015 12:03:35 +0000 (14:03 +0200)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:40:44 +0000 (13:40 -0700)
We want to be able to return bettern error values frmo nvme_timeout, which
is significantly easier if the two functions are merged.  Also clean up and
reduce the printk spew so that we only get one message per abort.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 31c7c7d2c9f17dc98a98c59c17e184bf164ee760)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/pci.c

index c09552884cdd1fe1ed604cd252fbd923cd572cfa..c8dec5f6356b52b0ff9e73cc6c47627188bbaf7f 100644 (file)
@@ -1100,13 +1100,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
        return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
 }
 
-/**
- * nvme_abort_req - Attempt aborting a request
- *
- * Schedule controller reset if the command was already aborted once before and
- * still hasn't been returned to the driver, or if this is the admin queue.
- */
-static void nvme_abort_req(struct request *req)
+static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 {
        struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd_rq->nvmeq;
@@ -1115,6 +1109,11 @@ static void nvme_abort_req(struct request *req)
        struct nvme_cmd_info *abort_cmd;
        struct nvme_command cmd;
 
+       /*
+        * Schedule controller reset if the command was already aborted once
+        * before and still hasn't been returned to the driver, or if this is
+        * the admin queue.
+        */
        if (!nvmeq->qid || cmd_rq->aborted) {
                spin_lock_irq(&dev_list_lock);
                if (!__nvme_reset(dev)) {
@@ -1123,16 +1122,16 @@ static void nvme_abort_req(struct request *req)
                                 req->tag, nvmeq->qid);
                }
                spin_unlock_irq(&dev_list_lock);
-               return;
+               return BLK_EH_RESET_TIMER;
        }
 
        if (!dev->ctrl.abort_limit)
-               return;
+               return BLK_EH_RESET_TIMER;
 
        abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, GFP_ATOMIC,
                                                                        false);
        if (IS_ERR(abort_req))
-               return;
+               return BLK_EH_RESET_TIMER;
 
        abort_cmd = blk_mq_rq_to_pdu(abort_req);
        nvme_set_info(abort_cmd, abort_req, abort_completion);
@@ -1146,9 +1145,16 @@ static void nvme_abort_req(struct request *req)
        --dev->ctrl.abort_limit;
        cmd_rq->aborted = 1;
 
-       dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
-                                                       nvmeq->qid);
+       dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
+                                req->tag, nvmeq->qid);
        nvme_submit_cmd(dev->queues[0], &cmd);
+
+       /*
+        * The aborted req will be completed on receiving the abort req.
+        * We enable the timer again. If hit twice, it'll cause a device reset,
+        * as the device then is in a faulty state.
+        */
+       return BLK_EH_RESET_TIMER;
 }
 
 static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
@@ -1179,23 +1185,6 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
        fn(nvmeq, ctx, &cqe);
 }
 
-static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
-{
-       struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
-       struct nvme_queue *nvmeq = cmd->nvmeq;
-
-       dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
-                                                       nvmeq->qid);
-       nvme_abort_req(req);
-
-       /*
-        * The aborted req will be completed on receiving the abort req.
-        * We enable the timer again. If hit twice, it'll cause a device reset,
-        * as the device then is in a faulty state.
-        */
-       return BLK_EH_RESET_TIMER;
-}
-
 static void nvme_free_queue(struct nvme_queue *nvmeq)
 {
        dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),