]> www.infradead.org Git - users/hch/block.git/commitdiff
nvme-pci: use a smarter encoding for small prp pool allocations
authorChristoph Hellwig <hch@lst.de>
Sat, 5 Oct 2024 17:21:33 +0000 (19:21 +0200)
committerChristoph Hellwig <hch@lst.de>
Sat, 5 Oct 2024 17:21:33 +0000 (19:21 +0200)
Make 0 mean no allocations and -1 "small pool" instead of vice versa.
With that only the small pool needs special casing.

Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/host/pci.c

index 5d67be831c2e9d13987d35610515eef62b6c6e84..e9869221300da09207fd26bc2f846e095f16af62 100644 (file)
@@ -223,7 +223,7 @@ struct nvme_iod {
        struct nvme_request req;
        struct nvme_command cmd;
        bool aborted;
-       /* # of PRP/SGL descriptors: (0 for small pool) */
+       /* # of PRP/SGL descriptors: (-1 for small pool) */
        s8 nr_descriptors;
        unsigned int dma_len;   /* length of single DMA segment mapping */
        dma_addr_t first_dma;
@@ -520,6 +520,12 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
        dma_addr_t dma_addr = iod->first_dma;
        int i;
 
+       if (iod->nr_descriptors == -1) {
+               dma_pool_free(dev->prp_small_pool, iod->descriptors[0],
+                             iod->first_dma);
+               return;
+       }
+
        for (i = 0; i < iod->nr_descriptors; i++) {
                __le64 *prp_list = iod->descriptors[i];
                dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
@@ -542,15 +548,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
        WARN_ON_ONCE(!iod->sgt.nents);
 
        dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
-
-       if (iod->nr_descriptors == 0)
-               dma_pool_free(dev->prp_small_pool, iod->descriptors[0],
-                             iod->first_dma);
-       else if (iod->nr_descriptors == 1)
-               dma_pool_free(dev->prp_page_pool, iod->descriptors[0],
-                             iod->first_dma);
-       else
-               nvme_free_prps(dev, req);
+       nvme_free_prps(dev, req);
        mempool_free(iod->sgt.sgl, dev->iod_mempool);
 }
 
@@ -605,7 +603,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
        nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
        if (nprps <= (256 / 8)) {
                pool = dev->prp_small_pool;
-               iod->nr_descriptors = 0;
+               iod->nr_descriptors = -1;
        } else {
                pool = dev->prp_page_pool;
                iod->nr_descriptors = 1;
@@ -613,7 +611,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
 
        prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
        if (!prp_list) {
-               iod->nr_descriptors = -1;
+               iod->nr_descriptors = 0;
                return BLK_STS_RESOURCE;
        }
        iod->descriptors[0] = prp_list;
@@ -695,7 +693,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
 
        if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
                pool = dev->prp_small_pool;
-               iod->nr_descriptors = 0;
+               iod->nr_descriptors = -1;
        } else {
                pool = dev->prp_page_pool;
                iod->nr_descriptors = 1;
@@ -703,7 +701,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
 
        sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
        if (!sg_list) {
-               iod->nr_descriptors = -1;
+               iod->nr_descriptors = 0;
                return BLK_STS_RESOURCE;
        }
 
@@ -833,7 +831,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
        blk_status_t ret;
 
        iod->aborted = false;
-       iod->nr_descriptors = -1;
+       iod->nr_descriptors = 0;
        iod->sgt.nents = 0;
 
        ret = nvme_setup_cmd(req->q->queuedata, req);