]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: refactor set_queue_count
authorChristoph Hellwig <hch@lst.de>
Tue, 20 Dec 2016 00:07:25 +0000 (16:07 -0800)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:40:41 +0000 (13:40 -0700)
Split out a helper that just issues the Set Features and interprets the
result which can go to common code, and document why we are ignoring
non-timeout error returns in the PCIe driver.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 9a0be7abb62ff2a7dc3360ab45c31f29b3faf642)

Orabug: 25130845
Conflicts:
drivers/nvme/host/pci.c

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c

index f8f6a8636d0cde89874d2d46df9481776476f74c..40f1c9aa32bfc36403b4c776d0b46bfe13d25633 100644 (file)
@@ -324,6 +324,22 @@ int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
        return error;
 }
 
+int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
+{
+       u32 q_count = (*count - 1) | ((*count - 1) << 16);
+       u32 result;
+       int status, nr_io_queues;
+
+       status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
+                       &result);
+       if (status)
+               return status;
+
+       nr_io_queues = min(result & 0xffff, result >> 16) + 1;
+       *count = min(*count, nr_io_queues);
+       return 0;
+}
+
 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
 {
        struct nvme_user_io io;
index 5fe77e07ac02ad67cd20e99c2e5711df30579666..935b4f27b02e0bfa3a9fc7e7fa9f48a1a1160f27 100644 (file)
@@ -231,6 +231,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
                        dma_addr_t dma_addr, u32 *result);
 int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
                        dma_addr_t dma_addr, u32 *result);
+int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
 
 extern spinlock_t dev_list_lock;
 
index 6819f128fde657d377e910677462e81f3a6ed01f..d04b7d67efd39fc2ebf78af76edefc1cd2c21a87 100644 (file)
@@ -1580,23 +1580,6 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
                }
 }
 
-static int set_queue_count(struct nvme_dev *dev, int count)
-{
-       int status;
-       u32 result;
-       u32 q_count = (count - 1) | ((count - 1) << 16);
-
-       status = nvme_set_features(&dev->ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
-                                                               &result);
-       if (status < 0)
-               return status;
-       if (status > 0) {
-               dev_err(dev->dev, "Could not set queue count (%d)\n", status);
-               return 0;
-       }
-       return min(result & 0xffff, result >> 16) + 1;
-}
-
 static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
 {
        u64 szu, size, offset;
@@ -1661,11 +1644,20 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        int result, i, vecs, nr_io_queues, size;
 
        nr_io_queues = min(nvme_io_queues, num_possible_cpus());
-       result = set_queue_count(dev, nr_io_queues);
-       if (result <= 0)
+       result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
+       if (result < 0)
                return result;
-       if (result < nr_io_queues)
-               nr_io_queues = result;
+
+       /*
+        * Degraded controllers might return an error when setting the queue
+        * count.  We still want to be able to bring them online and offer
+        * access to the admin queue, as that might be only way to fix them up.
+        */
+       if (result > 0) {
+               dev_err(dev->dev, "Could not set queue count (%d)\n", result);
+               nr_io_queues = 0;
+               result = 0;
+       }
 
        if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
                result = nvme_cmb_qdepth(dev, nr_io_queues,