]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: avoid cqe corruption when update at the same time as read
authorMarta Rybczynska <mrybczyn@kalray.eu>
Tue, 22 Mar 2016 15:02:06 +0000 (16:02 +0100)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:41:10 +0000 (13:41 -0700)
Make sure the CQE phase (validity) is read before the rest of the
structure. The phase bit is the highest address and the CQE
read will happen on most platforms from lower to upper addresses
and will be done by multiple non-atomic loads. If the structure
is updated by PCI during the reads from the processor, the
processor may get a corrupted copy.

The addition of the new nvme_cqe_valid function that verifies
the validity bit also allows refactoring of the other CQE read
sequences.

Signed-off-by: Marta Rybczynska <marta.rybczynska@kalray.eu>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit d783e0bd02e700e7a893ef4fa71c69438ac1c276)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/pci.c

index e53b63a1b86e995e901592f2f8601dde1502eea6..f638f7603aa7715052f30e70e4ddcc9a33e44443 100644 (file)
@@ -788,6 +788,13 @@ static void nvme_complete_rq(struct request *req)
        blk_mq_end_request(req, error);
 }
 
+/* We read the CQE phase first to check if the rest of the entry is valid */
+static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
+               u16 phase)
+{
+       return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
+}
+
 static int nvme_process_cq(struct nvme_queue *nvmeq)
 {
        u16 head, phase;
@@ -795,13 +802,10 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
        head = nvmeq->cq_head;
        phase = nvmeq->cq_phase;
 
-       for (;;) {
+       while (nvme_cqe_valid(nvmeq, head, phase)) {
                struct nvme_completion cqe = nvmeq->cqes[head];
-               u16 status = le16_to_cpu(cqe.status);
                struct request *req;
 
-               if ((status & 1) != phase)
-                       break;
                if (++head == nvmeq->q_depth) {
                        head = 0;
                        phase = !phase;
@@ -829,7 +833,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
                req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
                if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
                        memcpy(req->special, &cqe, sizeof(cqe));
-               blk_mq_complete_request(req, status >> 1);
+               blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
 
        }
 
@@ -866,10 +870,9 @@ static irqreturn_t nvme_irq(int irq, void *data)
 static irqreturn_t nvme_irq_check(int irq, void *data)
 {
        struct nvme_queue *nvmeq = data;
-       struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
-       if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
-               return IRQ_NONE;
-       return IRQ_WAKE_THREAD;
+       if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
+               return IRQ_WAKE_THREAD;
+       return IRQ_NONE;
 }
 
 static void nvme_async_event_work(struct work_struct *work)