]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
nvme: return the whole CQE through the request passthrough interface
authorChristoph Hellwig <hch@lst.de>
Mon, 29 Feb 2016 14:59:47 +0000 (15:59 +0100)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:41:05 +0000 (13:41 -0700)
Both LighNVM and NVMe over Fabrics need to look at more than just the
status and result field.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Matias Bj?rling <m@bjorling.me>
Reviewed-by: Jay Freyensee <james.p.freyensee@intel.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 1cb3cce5eb9de335330c8a147e47e3359a51a8b5)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c

index d3fefc826ae9df040180aa08841cbfb783c22091..265957975667b6435088c81647638e0e2fe90ea8 100644 (file)
@@ -129,7 +129,6 @@ struct request *nvme_alloc_request(struct request_queue *q,
 
        req->cmd = (unsigned char *)cmd;
        req->cmd_len = sizeof(struct nvme_command);
-       req->special = (void *)0;
 
        return req;
 }
@@ -140,7 +139,8 @@ EXPORT_SYMBOL_GPL(nvme_alloc_request);
  * if the result is positive, it's an NVM Express status code
  */
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-               void *buffer, unsigned bufflen, u32 *result, unsigned timeout)
+               struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+               unsigned timeout)
 {
        struct request *req;
        int ret;
@@ -150,6 +150,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                return PTR_ERR(req);
 
        req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+       req->special = cqe;
 
        if (buffer && bufflen) {
                ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -158,8 +159,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
        }
 
        blk_execute_rq(req->q, NULL, req, 0);
-       if (result)
-               *result = (u32)(uintptr_t)req->special;
        ret = req->errors;
  out:
        blk_mq_free_request(req);
@@ -169,7 +168,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buffer, unsigned bufflen)
 {
-       return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0);
+       return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0);
 }
 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
 
@@ -179,6 +178,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
                u32 *result, unsigned timeout)
 {
        bool write = cmd->common.opcode & 1;
+       struct nvme_completion cqe;
        struct nvme_ns *ns = q->queuedata;
        struct gendisk *disk = ns ? ns->disk : NULL;
        struct request *req;
@@ -191,6 +191,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
                return PTR_ERR(req);
 
        req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+       req->special = &cqe;
 
        if (ubuffer && bufflen) {
                ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
@@ -245,7 +246,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
        blk_execute_rq(req->q, disk, req, 0);
        ret = req->errors;
        if (result)
-               *result = (u32)(uintptr_t)req->special;
+               *result = le32_to_cpu(cqe.result);
        if (meta && !ret && !write) {
                if (copy_to_user(meta_buffer, meta, meta_len))
                        ret = -EFAULT;
@@ -316,6 +317,8 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
                                        dma_addr_t dma_addr, u32 *result)
 {
        struct nvme_command c;
+       struct nvme_completion cqe;
+       int ret;
 
        memset(&c, 0, sizeof(c));
        c.features.opcode = nvme_admin_get_features;
@@ -323,13 +326,18 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
        c.features.prp1 = cpu_to_le64(dma_addr);
        c.features.fid = cpu_to_le32(fid);
 
-       return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
+       ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
+       if (ret >= 0)
+               *result = le32_to_cpu(cqe.result);
+       return ret;
 }
 
 int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
                                        dma_addr_t dma_addr, u32 *result)
 {
        struct nvme_command c;
+       struct nvme_completion cqe;
+       int ret;
 
        memset(&c, 0, sizeof(c));
        c.features.opcode = nvme_admin_set_features;
@@ -337,7 +345,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
        c.features.fid = cpu_to_le32(fid);
        c.features.dword11 = cpu_to_le32(dword11);
 
-       return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
+       ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
+       if (ret >= 0)
+               *result = le32_to_cpu(cqe.result);
+       return ret;
 }
 
 int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
index a01eb264a1b77dee17346209a6fb6658367223d8..18e2b144856f94a6f4782e28ab627c290fb5f33a 100644 (file)
@@ -241,7 +241,8 @@ void nvme_requeue_req(struct request *req);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buf, unsigned bufflen);
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-               void *buffer, unsigned bufflen,  u32 *result, unsigned timeout);
+               struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+               unsigned timeout);
 int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
                void __user *ubuffer, unsigned bufflen, u32 *result,
                unsigned timeout);
index 887c3ab474e4359d0fb0aa1f279d572aecdd45b4..0523e90470c1b253ad6fab4558484603b5111338 100644 (file)
@@ -815,10 +815,8 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
                }
 
                req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
-               if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
-                       u32 result = le32_to_cpu(cqe.result);
-                       req->special = (void *)(uintptr_t)result;
-               }
+               if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
+                       memcpy(req->special, &cqe, sizeof(cqe));
                blk_mq_complete_request(req, status >> 1);
 
        }
@@ -949,9 +947,8 @@ static void abort_endio(struct request *req, int error)
        struct nvme_queue *nvmeq = iod->nvmeq;
        u16 status = req->errors;
 
-       dev_warn(nvmeq->dev->ctrl.device, "Abort status:%x result:%x", status);
+       dev_warn(nvmeq->dev->ctrl.device, "Abort status: 0x%x", status);
        atomic_inc(&nvmeq->dev->ctrl.abort_limit);
-
        blk_mq_free_request(req);
 }