*/
 struct nvme_iod {
        struct nvme_request req;
+       struct nvme_command cmd;
        struct nvme_queue *nvmeq;
        bool use_sgl;
        int aborted;
        struct nvme_dev *dev = nvmeq->dev;
        struct request *req = bd->rq;
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-       struct nvme_command cmnd;
+       struct nvme_command *cmnd = &iod->cmd;
        blk_status_t ret;
 
        iod->aborted = 0;
        if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
                return BLK_STS_IOERR;
 
-       ret = nvme_setup_cmd(ns, req, &cmnd);
+       ret = nvme_setup_cmd(ns, req, cmnd);
        if (ret)
                return ret;
 
        if (blk_rq_nr_phys_segments(req)) {
-               ret = nvme_map_data(dev, req, &cmnd);
+               ret = nvme_map_data(dev, req, cmnd);
                if (ret)
                        goto out_free_cmd;
        }
 
        if (blk_integrity_rq(req)) {
-               ret = nvme_map_metadata(dev, req, &cmnd);
+               ret = nvme_map_metadata(dev, req, cmnd);
                if (ret)
                        goto out_unmap_data;
        }
 
        blk_mq_start_request(req);
-       nvme_submit_cmd(nvmeq, &cmnd, bd->last);
+       nvme_submit_cmd(nvmeq, cmnd, bd->last);
        return BLK_STS_OK;
 out_unmap_data:
        nvme_unmap_data(dev, req);