]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
NVMe: Set affinity after allocating request queues
authorKeith Busch <keith.busch@intel.com>
Sun, 18 Dec 2016 03:03:47 +0000 (19:03 -0800)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:40:28 +0000 (13:40 -0700)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The asynchronous namespace scanning caused affinity hints to be set before
its tagset initialized, so there was no cpu mask to set the hint. This
patch moves the affinity hint setting to after namespaces are scanned.

Reported-by: 김경산 <ks0204.kim@samsung.com>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit bda4e0fb3126aca15586d165b5a15a37edc0a984)

Orabug: 25130845
Conflicts:
        Manually patched the commit.
        drivers/block/nvme-core.c

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/block/nvme-core.c

index 360526b19569270dfa92b48ea2a87b3c14425606..bc4bfeccede6e5bbba37e72540267ce9d0830c4b 100644 (file)
@@ -894,7 +894,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
         */
        if (ns && ns->ms && !blk_integrity_rq(req)) {
                if (!(ns->pi_type && ns->ms == 8) &&
-                                       req->cmd_type != REQ_TYPE_DRV_PRIV) {
+                                       req->cmd_type != REQ_TYPE_SPECIAL) {
                        req->errors = -EFAULT;
                        blk_mq_complete_request(req);
                        return BLK_MQ_RQ_QUEUE_OK;
@@ -2523,6 +2523,22 @@ done:
        kfree(id);
 }
 
+static void nvme_set_irq_hints(struct nvme_dev *dev)
+{
+       struct nvme_queue *nvmeq;
+       int i;
+
+       for (i = 0; i < dev->online_queues; i++) {
+               nvmeq = dev->queues[i];
+
+               if (!nvmeq->tags || !(*nvmeq->tags))
+                       continue;
+
+               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+                                       blk_mq_tags_cpumask(*nvmeq->tags));
+       }
+}
+
 static void nvme_dev_scan(struct work_struct *work)
 {
        struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
@@ -2530,6 +2546,8 @@ static void nvme_dev_scan(struct work_struct *work)
        if (!dev->tagset.tags)
                return;
        nvme_scan_namespaces(dev);
+
+       nvme_set_irq_hints(dev);
 }
 
 /*
@@ -3062,22 +3080,6 @@ static const struct file_operations nvme_dev_fops = {
        .compat_ioctl   = nvme_dev_ioctl,
 };
 
-static void nvme_set_irq_hints(struct nvme_dev *dev)
-{
-       struct nvme_queue *nvmeq;
-       int i;
-
-       for (i = 0; i < dev->online_queues; i++) {
-               nvmeq = dev->queues[i];
-
-               if (!nvmeq->tags || !(*nvmeq->tags))
-                       continue;
-
-               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
-                                       blk_mq_tags_cpumask(*nvmeq->tags));
-       }
-}
-
 static void nvme_probe_work(struct work_struct *work)
 {
        struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
@@ -3120,8 +3122,6 @@ static void nvme_probe_work(struct work_struct *work)
        if (result)
                goto free_tags;
 
-       nvme_set_irq_hints(dev);
-
        dev->event_limit = 1;
 
        /*