]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: synchronize access to ctrl->namespaces
authorChristoph Hellwig <hch@lst.de>
Thu, 24 Dec 2015 14:27:00 +0000 (15:27 +0100)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:40:52 +0000 (13:40 -0700)
Currently traversal and modification of ctrl->namespaces happens completely
unsynchronized, which can be fixed by the addition of a simple mutex.

Note: nvme_dev_ioctl will be handled in the next patch.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Acked-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 69d3b8ac15a5eb938e6a01909f6cc8ae4b5d3a17)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h

index c88afd2de9af7a00c21f473478af12c67985e05b..622c57e42ac6a29fdb69ffea9182d2d5caeee21f 100644 (file)
@@ -984,6 +984,8 @@ static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 {
        struct nvme_ns *ns;
 
+       lockdep_assert_held(&ctrl->namespaces_mutex);
+
        list_for_each_entry(ns, &ctrl->namespaces, list) {
                if (ns->ns_id == nsid)
                        return ns;
@@ -999,6 +1001,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        struct gendisk *disk;
        int node = dev_to_node(ctrl->dev);
 
+       lockdep_assert_held(&ctrl->namespaces_mutex);
+
        ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
        if (!ns)
                return;
@@ -1065,6 +1069,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        bool kill = nvme_io_incapable(ns->ctrl) &&
                        !blk_queue_dying(ns->queue);
 
+       lockdep_assert_held(&ns->ctrl->namespaces_mutex);
+
        if (kill)
                blk_set_queue_dying(ns->queue);
        if (ns->disk->flags & GENHD_FL_UP) {
@@ -1087,6 +1093,8 @@ static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
        struct nvme_ns *ns, *next;
        unsigned i;
 
+       lockdep_assert_held(&ctrl->namespaces_mutex);
+
        for (i = 1; i <= nn; i++) {
                ns = nvme_find_ns(ctrl, i);
                if (ns) {
@@ -1108,7 +1116,11 @@ void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
 
        if (nvme_identify_ctrl(ctrl, &id))
                return;
+       
+       mutex_lock(&ctrl->namespaces_mutex);
        __nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn));
+       mutex_unlock(&ctrl->namespaces_mutex);
+
        kfree(id);
 }
 
@@ -1116,8 +1128,10 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns, *next;
 
+       mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
                nvme_ns_remove(ns);
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 
 static DEFINE_IDA(nvme_instance_ida);
@@ -1185,6 +1199,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
        int ret;
 
        INIT_LIST_HEAD(&ctrl->namespaces);
+       mutex_init(&ctrl->namespaces_mutex);
        kref_init(&ctrl->kref);
        ctrl->dev = dev;
        ctrl->ops = ops;
@@ -1227,6 +1242,7 @@ void nvme_freeze_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
+       mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
                blk_mq_freeze_queue_start(ns->queue);
 
@@ -1237,18 +1253,21 @@ void nvme_freeze_queues(struct nvme_ctrl *ctrl)
                blk_mq_cancel_requeue_work(ns->queue);
                blk_mq_stop_hw_queues(ns->queue);
        }
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 
 void nvme_unfreeze_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
+       mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
                queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
                blk_mq_unfreeze_queue(ns->queue);
                blk_mq_start_stopped_hw_queues(ns->queue, true);
                blk_mq_kick_requeue_list(ns->queue);
        }
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 
 int __init nvme_core_init(void)
index 8b5000d443000628f0dce95eecf0cb59aff1ad0b..4a19c6454eb7c2496c2c4b00cc5b8e85b7ec40f5 100644 (file)
@@ -63,6 +63,7 @@ struct nvme_ctrl {
        int instance;
        struct blk_mq_tag_set *tagset;
        struct list_head namespaces;
+       struct mutex namespaces_mutex;
        struct device *device;  /* char device */
        struct list_head node;