]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: Use blk-mq helper for IO termination
authorSagi Grimberg <sagig@mellanox.com>
Tue, 12 Apr 2016 21:07:15 +0000 (15:07 -0600)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:41:14 +0000 (13:41 -0700)
blk-mq offers a tagset iterator so let's use that
instead of using nvme_clear_queues.

Note, we changed nvme_queue_cancel_ios name to nvme_cancel_io
as there is no concept of a queue now in this function (we
also lost the print).

Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 82b4552b91c40626a90a20291aab1137c638b512)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/pci.c

index 7cdedeb479ad83e9623d81882cdb97f97de007d2..80e28468eebeff601b79af149c5c905ff7920aca 100644 (file)
@@ -1007,16 +1007,15 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        return BLK_EH_RESET_TIMER;
 }
 
-static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
+static void nvme_cancel_io(struct request *req, void *data, bool reserved)
 {
-       struct nvme_queue *nvmeq = data;
+       struct nvme_dev *dev = data;
        int status;
 
        if (!blk_mq_request_started(req))
                return;
 
-       dev_dbg_ratelimited(nvmeq->dev->ctrl.device,
-                "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
+       dev_dbg_ratelimited(dev->ctrl.device, "Cancelling I/O %d", req->tag);
 
        status = NVME_SC_ABORT_REQ;
        if (blk_queue_dying(req->q))
@@ -1073,14 +1072,6 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
        return 0;
 }
 
-static void nvme_clear_queue(struct nvme_queue *nvmeq)
-{
-       spin_lock_irq(&nvmeq->q_lock);
-       if (nvmeq->tags && *nvmeq->tags)
-               blk_mq_all_tag_busy_iter(*nvmeq->tags, nvme_cancel_queue_ios, nvmeq);
-       spin_unlock_irq(&nvmeq->q_lock);
-}
-
 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
 {
        struct nvme_queue *nvmeq = dev->queues[0];
@@ -1820,8 +1811,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
        }
        nvme_dev_unmap(dev);
 
-       for (i = dev->queue_count - 1; i >= 0; i--)
-               nvme_clear_queue(dev->queues[i]);
+       blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
+       blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
        mutex_unlock(&dev->shutdown_lock);
 }