]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: switch abort to blk_execute_rq_nowait
authorChristoph Hellwig <hch@lst.de>
Thu, 29 Dec 2016 21:33:03 +0000 (13:33 -0800)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:40:57 +0000 (13:40 -0700)
And remove the now unused nvme_submit_cmd helper.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit e7a2a87d5938bbebe1637c82fbde94ea6be3ef78)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/pci.c
include/linux/blk-mq.h

index 11ac0aab13d2bcd86880862dc976a14789a117e7..fe1306c0b556beeee204029d696daabdde7676f8 100644 (file)
@@ -329,16 +329,6 @@ static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
        atomic_inc(&nvmeq->dev->ctrl.abort_limit);
 }
 
-static void async_completion(struct nvme_queue *nvmeq, void *ctx,
-                                               struct nvme_completion *cqe)
-{
-       struct async_cmd_info *cmdinfo = ctx;
-       cmdinfo->result = le32_to_cpup(&cqe->result);
-       cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
-       queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
-       blk_mq_free_request(cmdinfo->req);
-}
-
 /**
  * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
  * @nvmeq: The queue to use
@@ -362,14 +352,6 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
        nvmeq->sq_tail = tail;
 }
 
-static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&nvmeq->q_lock, flags);
-       __nvme_submit_cmd(nvmeq, cmd);
-       spin_unlock_irqrestore(&nvmeq->q_lock, flags);
-}
-
 static __le64 **iod_list(struct request *req)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -956,13 +938,24 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
        return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
 }
 
+static void abort_endio(struct request *req, int error)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       struct nvme_queue *nvmeq = iod->nvmeq;
+       u16 status = req->errors;
+
+       dev_warn(nvmeq->dev->ctrl.device, "Abort status:%x result:%x", status);
+       atomic_inc(&nvmeq->dev->ctrl.abort_limit);
+
+       blk_mq_free_request(req);
+}
+
 static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = iod->nvmeq;
        struct nvme_dev *dev = nvmeq->dev;
        struct request *abort_req;
-       struct nvme_cmd_info *abort_cmd;
        struct nvme_command cmd;
 
        /*
@@ -1000,32 +993,31 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
                return BLK_EH_HANDLED;
        }
 
-       if (atomic_dec_and_test(&dev->ctrl.abort_limit))
-               return BLK_EH_RESET_TIMER;
-
        iod->aborted = 1;
 
-       abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, GFP_ATOMIC,
-                                                                       false);
-       if (IS_ERR(abort_req)) {
+       if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
                atomic_inc(&dev->ctrl.abort_limit);
                return BLK_EH_RESET_TIMER;
        }
 
-       abort_cmd = blk_mq_rq_to_pdu(abort_req);
-       nvme_set_info(abort_cmd, abort_req, abort_completion);
-
        memset(&cmd, 0, sizeof(cmd));
        cmd.abort.opcode = nvme_admin_abort_cmd;
        cmd.abort.cid = req->tag;
        cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
-       cmd.abort.command_id = abort_req->tag;
-
-       cmd_rq->aborted = 1;
 
        dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
                                 req->tag, nvmeq->qid);
-       nvme_submit_cmd(dev->queues[0], &cmd);
+
+       abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
+                              BLK_MQ_REQ_NOWAIT);
+       if (IS_ERR(abort_req)) {
+               atomic_inc(&dev->ctrl.abort_limit);
+               return BLK_EH_RESET_TIMER;
+       }
+
+       abort_req->timeout = ADMIN_TIMEOUT;
+       abort_req->end_io_data = NULL;
+       blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
 
        /*
         * The aborted req will be completed on receiving the abort req.
index 94b640c92792f2dddafb7a52430d51d58521de1f..4563ce91788daedf06ebcede05722649bfb7ce3b 100644 (file)
@@ -179,6 +179,12 @@ void blk_mq_insert_request(struct request *, bool, bool, bool);
 void blk_mq_free_request(struct request *rq);
 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+
+enum {
+       BLK_MQ_REQ_NOWAIT       = (1 << 0), /* return when out of requests */
+       BLK_MQ_REQ_RESERVED     = (1 << 1), /* allocate from reserved pool */
+};
+
 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
                gfp_t gfp, bool reserved);
 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);