]> www.infradead.org Git - users/hch/misc.git/commitdiff
nvme-pci: refactor nvme_pci_use_sgls
authorChristoph Hellwig <hch@lst.de>
Tue, 17 Jun 2025 05:18:56 +0000 (07:18 +0200)
committerChristoph Hellwig <hch@lst.de>
Tue, 17 Jun 2025 05:26:52 +0000 (07:26 +0200)
Move the average segment size into a separate helper, and return a
tristate to distinguish the case where can use SGL vs where we have to
use SGLs.  This will allow the simplify the code and make more efficient
decisions in follow on changes.

Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/host/pci.c

index 8ff12e415cb5d1529d760b33f3e0cf3b8d1555f1..bc84438f0523eff9450af2dfb9b3f0c9c331688e 100644 (file)
@@ -578,6 +578,12 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
        spin_unlock(&nvmeq->sq_lock);
 }
 
+enum nvme_use_sgl {
+       SGL_UNSUPPORTED,
+       SGL_SUPPORTED,
+       SGL_FORCED,
+};
+
 static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev,
                                              struct request *req)
 {
@@ -587,23 +593,27 @@ static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev,
                nvme_req(req)->flags & NVME_REQ_USERCMD;
 }
 
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
-                                    int nseg)
+static inline enum nvme_use_sgl nvme_pci_use_sgls(struct nvme_dev *dev,
+               struct request *req)
 {
        struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
-       unsigned int avg_seg_size;
 
-       avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
+       if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) {
+               if (nvme_req(req)->flags & NVME_REQ_USERCMD)
+                       return SGL_FORCED;
+               if (req->nr_integrity_segments > 1)
+                       return SGL_FORCED;
+               return SGL_SUPPORTED;
+       }
 
-       if (!nvme_ctrl_sgl_supported(&dev->ctrl))
-               return false;
-       if (!nvmeq->qid)
-               return false;
-       if (nvme_pci_metadata_use_sgls(dev, req))
-               return true;
-       if (!sgl_threshold || avg_seg_size < sgl_threshold)
-               return nvme_req(req)->flags & NVME_REQ_USERCMD;
-       return true;
+       return SGL_UNSUPPORTED;
+}
+
+static unsigned int nvme_pci_avg_seg_size(struct request *req)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+       return DIV_ROUND_UP(blk_rq_payload_bytes(req), iod->sgt.nents);
 }
 
 static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq,
@@ -851,6 +861,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 {
        struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       enum nvme_use_sgl use_sgl = nvme_pci_use_sgls(dev, req);
        blk_status_t ret = BLK_STS_RESOURCE;
        int rc;
 
@@ -888,7 +899,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                goto out_free_sg;
        }
 
-       if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
+       if (use_sgl == SGL_FORCED ||
+           (use_sgl == SGL_SUPPORTED &&
+            (!sgl_threshold || nvme_pci_avg_seg_size(req) >= sgl_threshold)))
                ret = nvme_pci_setup_sgls(nvmeq, req, &cmnd->rw);
        else
                ret = nvme_pci_setup_prps(nvmeq, req, &cmnd->rw);