]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: move namespace scanning to core
authorChristoph Hellwig <hch@lst.de>
Fri, 30 Dec 2016 21:10:00 +0000 (13:10 -0800)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:41:20 +0000 (13:41 -0700)
Move the scan work item and surrounding code to the common code.  For now
we need a new finish_scan method to allow the PCI driver to set the
irq affinity hints, but I have plans in the works to obsolete this as well.

Note that this moves the namespace scanning from nvme_wq to the system
workqueue, but as we don't rely on namespace scanning to finish from reset
or I/O this should be fine.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by Jon Derrick: <jonathan.derrick@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 5955be2144b3b56182e2175e7e3d2ddf27fb485d)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c

index a73f001dec205298e35684ebbeaa11ce0f5d6fd1..07a6bcd0d3af41d2795efb407b01d4f4ab38a218 100644 (file)
@@ -1415,7 +1415,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        nvme_put_ns(ns);
 }
 
-static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
+static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
 {
        struct nvme_ns *ns, *next;
        unsigned i;
@@ -1437,20 +1437,40 @@ static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
        list_sort(NULL, &ctrl->namespaces, ns_cmp);
 }
 
-void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
+static void nvme_scan_work(struct work_struct *work)
 {
+       struct nvme_ctrl *ctrl =
+               container_of(work, struct nvme_ctrl, scan_work);
        struct nvme_id_ctrl *id;
+       unsigned nn;
+
+       if (ctrl->state != NVME_CTRL_LIVE)
+               return;
 
        if (nvme_identify_ctrl(ctrl, &id))
                return;
        
+       nn = le32_to_cpu(id->nn);
        mutex_lock(&ctrl->namespaces_mutex);
-       __nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn));
+       nvme_scan_ns_sequential(ctrl, nn);
        mutex_unlock(&ctrl->namespaces_mutex);
 
        kfree(id);
+
+       if (ctrl->ops->post_scan)
+               ctrl->ops->post_scan(ctrl);
 }
-EXPORT_SYMBOL_GPL(nvme_scan_namespaces);
+
+void nvme_queue_scan(struct nvme_ctrl *ctrl)
+{
+       /*
+        * Do not queue new scan work when a controller is reset during
+        * removal.
+        */
+       if (ctrl->state == NVME_CTRL_LIVE)
+               schedule_work(&ctrl->scan_work);
+}
+EXPORT_SYMBOL_GPL(nvme_queue_scan);
 
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 {
@@ -1492,6 +1512,9 @@ static void nvme_release_instance(struct nvme_ctrl *ctrl)
 
 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
 {
+       flush_work(&ctrl->scan_work);
+       nvme_remove_namespaces(ctrl);
+
        device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
 
        spin_lock(&dev_list_lock);
@@ -1535,6 +1558,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
        ctrl->dev = dev;
        ctrl->ops = ops;
        ctrl->quirks = quirks;
+       INIT_WORK(&ctrl->scan_work, nvme_scan_work);
 
        ret = nvme_set_instance(ctrl);
        if (ret)
index becf2134f23f6105c3245dd849cc80132bbabcdb..33e0195297b6dbba07327bfaf9c4aff990ba8a07 100644 (file)
@@ -96,6 +96,7 @@ struct nvme_ctrl {
        u32 vs;
        bool subsystem;
        unsigned long quirks;
+       struct work_struct scan_work;
 };
 
 /*
@@ -134,6 +135,7 @@ struct nvme_ctrl_ops {
        int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
        int (*reset_ctrl)(struct nvme_ctrl *ctrl);
        void (*free_ctrl)(struct nvme_ctrl *ctrl);
+       void (*post_scan)(struct nvme_ctrl *ctrl);
 };
 
 static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
@@ -194,7 +196,7 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
 void nvme_put_ctrl(struct nvme_ctrl *ctrl);
 int nvme_init_identify(struct nvme_ctrl *ctrl);
 
-void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
+void nvme_queue_scan(struct nvme_ctrl *ctrl);
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
 
 void nvme_stop_queues(struct nvme_ctrl *ctrl);
index 8c027c5c08ccb6b92714f685cd06c4c9a6f2f5a7..0306ec5a2855b4e56c63c868e5f4d08bd0ba4617 100644 (file)
@@ -105,7 +105,6 @@ struct nvme_dev {
        void __iomem *bar;
        struct mutex namespaces_mutex;
        struct work_struct reset_work;
-       struct work_struct scan_work;
        struct work_struct remove_work;
        struct work_struct async_work;
        struct timer_list watchdog_timer;
@@ -279,16 +278,6 @@ static int nvme_init_request(void *data, struct request *req,
        return 0;
 }
 
-static void nvme_queue_scan(struct nvme_dev *dev)
-{
-       /*
-        * Do not queue new scan work when a controller is reset during
-        * removal.
-        */
-       if (dev->ctrl.state == NVME_CTRL_LIVE)
-               queue_work(nvme_workq, &dev->scan_work);
-}
-
 static void nvme_complete_async_event(struct nvme_dev *dev,
                struct nvme_completion *cqe)
 {
@@ -306,7 +295,7 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
        switch (result & 0xff07) {
        case NVME_AER_NOTICE_NS_CHANGED:
                dev_info(dev->ctrl.device, "rescanning\n");
-               nvme_queue_scan(dev);
+               nvme_queue_scan(&dev->ctrl);
        default:
                dev_warn(dev->ctrl.device, "async event result %08x\n", result);
        }
@@ -1561,31 +1550,21 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        return result;
 }
 
-static void nvme_set_irq_hints(struct nvme_dev *dev)
+static void nvme_pci_post_scan(struct nvme_ctrl *ctrl)
 {
-       struct nvme_queue *nvmeq;
-       int i;
-
-       for (i = 0; i < dev->online_queues; i++) {
-               nvmeq = dev->queues[i];
-
-               if (!nvmeq->tags || !(*nvmeq->tags))
-                       continue;
-
-               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
-                                       blk_mq_tags_cpumask(*nvmeq->tags));
-       }
-}
+       struct nvme_dev *dev = to_nvme_dev(ctrl);
+       struct nvme_queue *nvmeq;
+       int i;
 
-static void nvme_dev_scan(struct work_struct *work)
-{
-       struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
+       for (i = 0; i < dev->online_queues; i++) {
+               nvmeq = dev->queues[i];
 
-       if (!dev->tagset.tags)
-               return;
-       nvme_scan_namespaces(dev);
+               if (!nvmeq->tags || !(*nvmeq->tags))
+                       continue;
 
-       nvme_set_irq_hints(dev);
+               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+                                       blk_mq_tags_cpumask(*nvmeq->tags));
+       }
 }
 
 static void nvme_del_queue_end(struct request *req, int error)
@@ -1941,7 +1920,7 @@ static void nvme_reset_work(struct work_struct *work)
        }
 
        if (dev->online_queues > 1)
-               nvme_queue_scan(dev);
+               nvme_queue_scan(&dev->ctrl);
        return;
 
  out:
@@ -2009,6 +1988,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
        .reg_read64             = nvme_pci_reg_read64,
        .reset_ctrl             = nvme_pci_reset_ctrl,
        .free_ctrl              = nvme_pci_free_ctrl,
+       .post_scan              = nvme_pci_post_scan,
 };
 
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -2038,7 +2018,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        dev->dev = get_device(&pdev->dev);
        pci_set_drvdata(pdev, dev);
 
-       INIT_WORK(&dev->scan_work, nvme_dev_scan);
        INIT_WORK(&dev->reset_work, nvme_reset_work);
        INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
        INIT_WORK(&dev->async_work, nvme_async_event_work);
@@ -2103,9 +2082,6 @@ static void nvme_remove(struct pci_dev *pdev)
 
        pci_set_drvdata(pdev, NULL);
        flush_work(&dev->async_work);
-       flush_work(&dev->reset_work);
-       flush_work(&dev->scan_work);
-       nvme_remove_namespaces(&dev->ctrl);
        nvme_uninit_ctrl(&dev->ctrl);
        nvme_dev_disable(dev, true);
        flush_work(&dev->reset_work);