From 4369f33dfdd50a5011922d45830e2b69ba4067ce Mon Sep 17 00:00:00 2001 From: Marta Rybczynska Date: Sun, 18 Dec 2016 10:21:19 -0800 Subject: [PATCH] nvme: avoid cqe corruption when update at the same time as read Make sure the CQE phase (validity) is read before the rest of the structure. The phase bit is the highest address and the CQE read will happen on most platforms from lower to upper addresses and will be done by multiple non-atomic loads. If the structure is updated by PCI during the reads from the processor, the processor may get a corrupted copy. The addition of the new nvme_cqe_valid function that verifies the validity bit also allows refactoring of the other CQE read sequences. Signed-off-by: Marta Rybczynska Reviewed-by: Johannes Thumshirn Reviewed-by: Christoph Hellwig Reviewed-by: Keith Busch Signed-off-by: Jens Axboe (cherry picked from commit d783e0bd02e700e7a893ef4fa71c69438ac1c276) Orabug: 24960824 Conflicts: nvme_poll() function is not available in UEK4QU2. Resolved the conflicts around nvme poll function. Signed-off-by: Ashok Vairavan Reviewed-by: Martin K. Petersen Signed-off-by: Dhaval Giani --- drivers/nvme/host/pci.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9377f385b8e6..3881e96d87b2 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -941,6 +941,13 @@ out: return ret; } +/* We read the CQE phase first to check if the rest of the entry is valid */ +static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head, + u16 phase) +{ + return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase; +} + static int nvme_process_cq(struct nvme_queue *nvmeq) { u16 head, phase; @@ -948,12 +955,10 @@ static int nvme_process_cq(struct nvme_queue *nvmeq) head = nvmeq->cq_head; phase = nvmeq->cq_phase; - for (;;) { + while (nvme_cqe_valid(nvmeq, head, phase)) { void *ctx; nvme_completion_fn fn; struct nvme_completion cqe = nvmeq->cqes[head]; - if ((le16_to_cpu(cqe.status) & 1) != phase) - break; nvmeq->sq_head = le16_to_cpu(cqe.sq_head); if (++head == nvmeq->q_depth) { head = 0; @@ -996,10 +1001,9 @@ static irqreturn_t nvme_irq(int irq, void *data) static irqreturn_t nvme_irq_check(int irq, void *data) { struct nvme_queue *nvmeq = data; - struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; - if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) - return IRQ_NONE; - return IRQ_WAKE_THREAD; + if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) + return IRQ_WAKE_THREAD; + return IRQ_NONE; } /* -- 2.50.1