spin_unlock(&nvmeq->sq_lock);
}
+enum nvme_use_sgl {
+ SGL_UNSUPPORTED,
+ SGL_SUPPORTED,
+ SGL_FORCED,
+};
+
static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev,
struct request *req)
{
nvme_req(req)->flags & NVME_REQ_USERCMD;
}
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
- int nseg)
+static inline enum nvme_use_sgl nvme_pci_use_sgls(struct nvme_dev *dev,
+ struct request *req)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- unsigned int avg_seg_size;
- avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
+ if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) {
+ if (nvme_req(req)->flags & NVME_REQ_USERCMD)
+ return SGL_FORCED;
+ if (req->nr_integrity_segments > 1)
+ return SGL_FORCED;
+ return SGL_SUPPORTED;
+ }
- if (!nvme_ctrl_sgl_supported(&dev->ctrl))
- return false;
- if (!nvmeq->qid)
- return false;
- if (nvme_pci_metadata_use_sgls(dev, req))
- return true;
- if (!sgl_threshold || avg_seg_size < sgl_threshold)
- return nvme_req(req)->flags & NVME_REQ_USERCMD;
- return true;
+ return SGL_UNSUPPORTED;
+}
+
+static unsigned int nvme_pci_avg_seg_size(struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ return DIV_ROUND_UP(blk_rq_payload_bytes(req), iod->sgt.nents);
}
static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq,
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ enum nvme_use_sgl use_sgl = nvme_pci_use_sgls(dev, req);
blk_status_t ret = BLK_STS_RESOURCE;
int rc;
goto out_free_sg;
}
- if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
+ if (use_sgl == SGL_FORCED ||
+ (use_sgl == SGL_SUPPORTED &&
+ (!sgl_threshold || nvme_pci_avg_seg_size(req) >= sgl_threshold)))
ret = nvme_pci_setup_sgls(nvmeq, req, &cmnd->rw);
else
ret = nvme_pci_setup_prps(nvmeq, req, &cmnd->rw);