]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: special case AEN requests
authorChristoph Hellwig <hch@lst.de>
Thu, 22 Dec 2016 06:59:20 +0000 (22:59 -0800)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:40:49 +0000 (13:40 -0700)
AEN requests are different from other requests in that they don't time out
or can easily be cancelled.  Because of that we should not use the blk-mq
infrastructure but just special case them in the completion path.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 3e1e21c7bfcfa9bf06c07f48a13faca2f62b3339)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/pci.c

index aa19cbd6d69a891c0d92782852f79d83e595556a..1a2f430647d69abeaa06295507480dacb3f353a4 100644 (file)
 #define NVME_AQ_DEPTH          256
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
+               
+/*
+ * We handle AEN commands ourselves and don't even let the
+ * block layer know about them.
+ */
+#define NVME_NR_AEN_COMMANDS   1
+#define NVME_AQ_BLKMQ_DEPTH    (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
 
 unsigned char admin_timeout = 60;
 module_param(admin_timeout, byte, 0644);
@@ -364,23 +371,23 @@ static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
        return ctx;
 }
 
-static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
-                                               struct nvme_completion *cqe)
+static void nvme_complete_async_event(struct nvme_dev *dev,
+               struct nvme_completion *cqe)
 {
-       u32 result = le32_to_cpup(&cqe->result);
-       u16 status = le16_to_cpup(&cqe->status) >> 1;
+       u16 status = le16_to_cpu(cqe->status) >> 1;
+       u32 result = le32_to_cpu(cqe->result);
 
        if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
-               ++nvmeq->dev->ctrl.event_limit;
+               ++dev->ctrl.event_limit;
        if (status != NVME_SC_SUCCESS)
                return;
 
        switch (result & 0xff07) {
        case NVME_AER_NOTICE_NS_CHANGED:
-               dev_info(nvmeq->q_dmadev, "rescanning\n");
-               queue_work(nvme_workq, &nvmeq->dev->scan_work);
+               dev_info(dev->dev, "rescanning\n");
+               queue_work(nvme_workq, &dev->scan_work);
        default:
-               dev_warn(nvmeq->q_dmadev, "async event result %08x\n", result);
+               dev_warn(dev->dev, "async event result %08x\n", result);
        }
 }
 
@@ -437,7 +444,7 @@ static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag,
 }
 
 /**
- * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
  * @nvmeq: The queue to use
  * @cmd: The command to send
  *
@@ -933,13 +940,28 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
                void *ctx;
                nvme_completion_fn fn;
                struct nvme_completion cqe = nvmeq->cqes[head];
-               if ((le16_to_cpu(cqe.status) & 1) != phase)
+               u16 status = le16_to_cpu(cqe.status);
+
+               if ((status & 1) != phase)
                        break;
                nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
                if (++head == nvmeq->q_depth) {
                        head = 0;
                        phase = !phase;
                }
+
+               /*
+                * AEN requests are special as they don't time out and can
+                * survive any kind of queue freeze and often don't respond to
+                * aborts.  We don't even bother to allocate a struct request
+                * for them but rather special case them here.
+                */
+               if (unlikely(nvmeq->qid == 0 &&
+                               cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
+                       nvme_complete_async_event(nvmeq->dev, &cqe);
+                       continue;
+               }
+
                ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn);
                fn(nvmeq, ctx, &cqe);
        }
@@ -983,28 +1005,15 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
        return IRQ_WAKE_THREAD;
 }
 
-static int nvme_submit_async_admin_req(struct nvme_dev *dev)
+static void nvme_submit_async_event(struct nvme_dev *dev)
 {
-       struct nvme_queue *nvmeq = dev->queues[0];
        struct nvme_command c;
-       struct nvme_cmd_info *cmd_info;
-       struct request *req;
-
-       req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, GFP_ATOMIC, true);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
-
-       req->cmd_flags |= REQ_NO_TIMEOUT;
-       cmd_info = blk_mq_rq_to_pdu(req);
-       nvme_set_info(cmd_info, NULL, async_req_completion);
 
        memset(&c, 0, sizeof(c));
        c.common.opcode = nvme_admin_async_event;
-       c.common.command_id = req->tag;
+       c.common.command_id = NVME_AQ_BLKMQ_DEPTH + --dev->ctrl.event_limit;
 
-       blk_mq_free_request(req);
-       __nvme_submit_cmd(nvmeq, &c);
-       return 0;
+       __nvme_submit_cmd(dev->queues[0], &c);
 }
 
 static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
@@ -1143,7 +1152,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 
        abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, GFP_ATOMIC,
                                                                        false);
-       if (IS_ERR(abort_req))
+       if (IS_ERR(abort_req)) {
                atomic_inc(&dev->ctrl.abort_limit);
                return BLK_EH_RESET_TIMER;
        }
@@ -1444,8 +1453,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
        if (!dev->ctrl.admin_q) {
                dev->admin_tagset.ops = &nvme_mq_admin_ops;
                dev->admin_tagset.nr_hw_queues = 1;
-               dev->admin_tagset.queue_depth = NVME_AQ_DEPTH;
-               dev->admin_tagset.reserved_tags = 1;
+               dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH;
                dev->admin_tagset.timeout = ADMIN_TIMEOUT;
                dev->admin_tagset.numa_node = dev_to_node(dev->dev);
                dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
@@ -1553,11 +1561,8 @@ static int nvme_kthread(void *data)
                                spin_lock_irq(&nvmeq->q_lock);
                                nvme_process_cq(nvmeq);
 
-                               while (i == 0 && dev->ctrl.event_limit > 0) {
-                                       if (nvme_submit_async_admin_req(dev))
-                                               break;
-                                       dev->ctrl.event_limit--;
-                               }
+                               while (i == 0 && dev->ctrl.event_limit > 0)
+                                       nvme_submit_async_event(dev);
                                spin_unlock_irq(&nvmeq->q_lock);
                        }
                }
@@ -2217,7 +2222,7 @@ static void nvme_reset_work(struct work_struct *work)
        if (result)
                goto free_tags;
 
-       dev->ctrl.event_limit = 1;
+       dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
 
        result = nvme_dev_list_add(dev);
        if (result)