]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: don't poll the CQ from the kthread
authorChristoph Hellwig <hch@lst.de>
Mon, 29 Feb 2016 14:59:45 +0000 (15:59 +0100)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:41:03 +0000 (13:41 -0700)
There is no reason to do unconditional polling of CQs per the NVMe
spec.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 79f2b358c9ba373943a9284be2861fde58291c4e)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/pci.c

index 65b85ae5a4cb11bb9fec26257ad3f7eb832144f1..b8f35a5272a9e19f51443e582195790a03903249 100644 (file)
@@ -1197,9 +1197,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
        nvmeq->qid = qid;
        nvmeq->cq_vector = -1;
        dev->queues[qid] = nvmeq;
-
-       /* make sure queue descriptor is set before queue count, for kthread */
-       mb();
        dev->queue_count++;
 
        return nvmeq;
@@ -1385,7 +1382,6 @@ static int nvme_kthread(void *data)
                set_current_state(TASK_INTERRUPTIBLE);
                spin_lock(&dev_list_lock);
                list_for_each_entry_safe(dev, next, &dev_list, node) {
-                       int i;
                        u32 csts = readl(dev->bar + NVME_REG_CSTS);
 
                        /*
@@ -1403,14 +1399,6 @@ static int nvme_kthread(void *data)
                                }
                                continue;
                        }
-                       for (i = 0; i < dev->queue_count; i++) {
-                               struct nvme_queue *nvmeq = dev->queues[i];
-                               if (!nvmeq)
-                                       continue;
-                               spin_lock_irq(&nvmeq->q_lock);
-                               nvme_process_cq(nvmeq);
-                               spin_unlock_irq(&nvmeq->q_lock);
-                       }
                }
                spin_unlock(&dev_list_lock);
                schedule_timeout(round_jiffies_relative(HZ));