]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: only add a controller to dev_list after it's been fully initialized
authorChristoph Hellwig <hch@lst.de>
Thu, 22 Oct 2015 12:03:33 +0000 (14:03 +0200)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:40:43 +0000 (13:40 -0700)
Without this we can easily get bad derferences on nvmeq->d_db when the nvme
kthread tries to poll the CQs for controllers that are in half initialized
state.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 7385014c073263b077442439299fad013edd4409)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/pci.c

index 79d9ebc05006b96e7f40fa7fd4f55114ba2c9439..82b8bf86b06b89b222ce011fb8400783d955c8d5 100644 (file)
@@ -2028,6 +2028,30 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
        kthread_stop(kworker_task);
 }
 
+static int nvme_dev_list_add(struct nvme_dev *dev)
+{
+       bool start_thread = false;
+
+       spin_lock(&dev_list_lock);
+       if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
+               start_thread = true;
+               nvme_thread = NULL;
+       }
+       list_add(&dev->node, &dev_list);
+       spin_unlock(&dev_list_lock);
+
+       if (start_thread) {
+               nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+               wake_up_all(&nvme_kthread_wait);
+       } else
+               wait_event_killable(nvme_kthread_wait, nvme_thread);
+
+       if (IS_ERR_OR_NULL(nvme_thread))
+               return nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
+
+       return 0;
+}
+
 /*
 * Remove the node from the device list and check
 * for whether or not we need to stop the nvme_thread.
@@ -2147,7 +2171,6 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
 static void nvme_probe_work(struct work_struct *work)
 {
        struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
-       bool start_thread = false;
        int result;
 
        result = nvme_dev_map(dev);
@@ -2158,25 +2181,6 @@ static void nvme_probe_work(struct work_struct *work)
        if (result)
                goto unmap;
 
-       spin_lock(&dev_list_lock);
-       if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
-               start_thread = true;
-               nvme_thread = NULL;
-       }
-       list_add(&dev->node, &dev_list);
-       spin_unlock(&dev_list_lock);
-
-       if (start_thread) {
-               nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
-               wake_up_all(&nvme_kthread_wait);
-       } else
-               wait_event_killable(nvme_kthread_wait, nvme_thread);
-
-       if (IS_ERR_OR_NULL(nvme_thread)) {
-               result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
-               goto disable;
-       }
-
        nvme_init_queue(dev->queues[0], 0);
        result = nvme_alloc_admin_tags(dev);
        if (result)
@@ -2192,6 +2196,10 @@ static void nvme_probe_work(struct work_struct *work)
 
        dev->ctrl.event_limit = 1;
 
+       result = nvme_dev_list_add(dev);
+       if (result)
+               goto remove;
+
        /*
         * Keep the controller around but remove all namespaces if we don't have
         * any working I/O queue.
@@ -2206,13 +2214,14 @@ static void nvme_probe_work(struct work_struct *work)
 
        return;
 
+ remove:
+       nvme_dev_list_remove(dev);
  free_tags:
        nvme_dev_remove_admin(dev);
        blk_put_queue(dev->ctrl.admin_q);
        dev->ctrl.admin_q = NULL;
  disable:
        nvme_disable_queue(dev, 0);
-       nvme_dev_list_remove(dev);
  unmap:
        nvme_dev_unmap(dev);
  out: