SGL_FORCED,
};
-static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev,
- struct request *req)
+static inline bool nvme_pci_metadata_use_sgls(struct request *req)
{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+
if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl))
return false;
return req->nr_integrity_segments > 1 ||
return nvmeq->descriptor_pools.large;
}
-static void nvme_free_descriptors(struct nvme_queue *nvmeq, struct request *req)
+static void nvme_free_descriptors(struct request *req)
{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_addr_t dma_addr = iod->first_dma;
}
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_queue *nvmeq,
- struct request *req)
+static void nvme_unmap_data(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
if (iod->dma_len) {
- dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+ dma_unmap_page(nvmeq->dev->dev, iod->first_dma, iod->dma_len,
rq_dma_dir(req));
return;
}
WARN_ON_ONCE(!iod->sgt.nents);
- dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
- nvme_free_descriptors(nvmeq, req);
- mempool_free(iod->sgt.sgl, dev->iod_mempool);
+ dma_unmap_sgtable(nvmeq->dev->dev, &iod->sgt, rq_dma_dir(req), 0);
+ nvme_free_descriptors(req);
+ mempool_free(iod->sgt.sgl, nvmeq->dev->iod_mempool);
}
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
}
}
-static blk_status_t nvme_pci_setup_prps(struct nvme_queue *nvmeq,
- struct request *req, struct nvme_rw_command *cmnd)
+static blk_status_t nvme_pci_setup_prps(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
int length = blk_rq_payload_bytes(req);
struct scatterlist *sg = iod->sgt.sgl;
int dma_len = sg_dma_len(sg);
dma_len = sg_dma_len(sg);
}
done:
- cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
- cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
+ iod->cmd.common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
+ iod->cmd.common.dptr.prp2 = cpu_to_le64(iod->first_dma);
return BLK_STS_OK;
free_prps:
- nvme_free_descriptors(nvmeq, req);
+ nvme_free_descriptors(req);
return BLK_STS_RESOURCE;
bad_sgl:
WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
}
-static blk_status_t nvme_pci_setup_sgls(struct nvme_queue *nvmeq,
- struct request *req, struct nvme_rw_command *cmd)
+static blk_status_t nvme_pci_setup_sgls(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct nvme_sgl_desc *sg_list;
struct scatterlist *sg = iod->sgt.sgl;
unsigned int entries = iod->sgt.nents;
int i = 0;
/* setting the transfer type as SGL */
- cmd->flags = NVME_CMD_SGL_METABUF;
+ iod->cmd.common.flags = NVME_CMD_SGL_METABUF;
if (entries == 1) {
- nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
+ nvme_pci_sgl_set_data(&iod->cmd.common.dptr.sgl, sg);
return BLK_STS_OK;
}
iod->descriptors[iod->nr_descriptors++] = sg_list;
iod->first_dma = sgl_dma;
- nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
+ nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, entries);
do {
nvme_pci_sgl_set_data(&sg_list[i++], sg);
sg = sg_next(sg);
return BLK_STS_OK;
}
-static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
- struct nvme_command *cmnd)
+static blk_status_t nvme_map_data(struct request *req)
{
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
enum nvme_use_sgl use_sgl = nvme_pci_use_sgls(dev, req);
blk_status_t ret = BLK_STS_RESOURCE;
int rc;
if (use_sgl == SGL_FORCED ||
(use_sgl == SGL_SUPPORTED &&
(!sgl_threshold || nvme_pci_avg_seg_size(req) >= sgl_threshold)))
- ret = nvme_pci_setup_sgls(nvmeq, req, &cmnd->rw);
+ ret = nvme_pci_setup_sgls(req);
else
- ret = nvme_pci_setup_prps(nvmeq, req, &cmnd->rw);
+ ret = nvme_pci_setup_prps(req);
if (ret != BLK_STS_OK)
goto out_unmap_sg;
return BLK_STS_OK;
return ret;
}
-static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev,
- struct request *req)
+static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_rw_command *cmnd = &iod->cmd.rw;
struct nvme_sgl_desc *sg_list;
struct scatterlist *sgl, *sg;
unsigned int entries;
iod->meta_descriptor = sg_list;
iod->meta_dma = sgl_dma;
- cmnd->flags = NVME_CMD_SGL_METASEG;
- cmnd->metadata = cpu_to_le64(sgl_dma);
+ iod->cmd.common.flags = NVME_CMD_SGL_METASEG;
+ iod->cmd.common.metadata = cpu_to_le64(sgl_dma);
sgl = iod->meta_sgt.sgl;
if (entries == 1) {
return BLK_STS_RESOURCE;
}
-static blk_status_t nvme_pci_setup_meta_mptr(struct nvme_dev *dev,
- struct request *req)
+static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct bio_vec bv = rq_integrity_vec(req);
- struct nvme_command *cmnd = &iod->cmd;
- iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
- if (dma_mapping_error(dev->dev, iod->meta_dma))
+ iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
- cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
+ iod->cmd.common.metadata = cpu_to_le64(iod->meta_dma);
return BLK_STS_OK;
}
-static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_map_metadata(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) &&
- nvme_pci_metadata_use_sgls(dev, req))
- return nvme_pci_setup_meta_sgls(dev, req);
- return nvme_pci_setup_meta_mptr(dev, req);
+ nvme_pci_metadata_use_sgls(req))
+ return nvme_pci_setup_meta_sgls(req);
+ return nvme_pci_setup_meta_mptr(req);
}
-static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_prep_rq(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret;
return ret;
if (blk_rq_nr_phys_segments(req)) {
- ret = nvme_map_data(dev, req, &iod->cmd);
+ ret = nvme_map_data(req);
if (ret)
goto out_free_cmd;
}
if (blk_integrity_rq(req)) {
- ret = nvme_map_metadata(dev, req);
+ ret = nvme_map_metadata(req);
if (ret)
goto out_unmap_data;
}
return BLK_STS_OK;
out_unmap_data:
if (blk_rq_nr_phys_segments(req))
- nvme_unmap_data(dev, req->mq_hctx->driver_data, req);
+ nvme_unmap_data(req);
out_free_cmd:
nvme_cleanup_cmd(req);
return ret;
if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
return nvme_fail_nonready_command(&dev->ctrl, req);
- ret = nvme_prep_rq(dev, req);
+ ret = nvme_prep_rq(req);
if (unlikely(ret))
return ret;
spin_lock(&nvmeq->sq_lock);
if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
return false;
- return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
+ return nvme_prep_rq(req) == BLK_STS_OK;
}
static void nvme_queue_rqs(struct rq_list *rqlist)
*rqlist = requeue_list;
}
-static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev,
- struct nvme_queue *nvmeq,
- struct request *req)
+static __always_inline void nvme_unmap_metadata(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
if (!iod->meta_sgt.nents) {
dma_unmap_page(dev->dev, iod->meta_dma,
static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- struct nvme_dev *dev = nvmeq->dev;
-
if (blk_integrity_rq(req))
- nvme_unmap_metadata(dev, nvmeq, req);
-
+ nvme_unmap_metadata(req);
if (blk_rq_nr_phys_segments(req))
- nvme_unmap_data(dev, nvmeq, req);
+ nvme_unmap_data(req);
}
static void nvme_pci_complete_rq(struct request *req)