struct nvme_request req;
struct nvme_command cmd;
bool aborted;
- /* # of PRP/SGL descriptors: (0 for small pool) */
+ /* # of PRP/SGL descriptors: (-1 for small pool) */
s8 nr_descriptors;
unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t first_dma;
dma_addr_t dma_addr = iod->first_dma;
int i;
+ if (iod->nr_descriptors == -1) {
+ dma_pool_free(dev->prp_small_pool, iod->descriptors[0],
+ iod->first_dma);
+ return;
+ }
+
for (i = 0; i < iod->nr_descriptors; i++) {
__le64 *prp_list = iod->descriptors[i];
dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
WARN_ON_ONCE(!iod->sgt.nents);
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
-
- if (iod->nr_descriptors == 0)
- dma_pool_free(dev->prp_small_pool, iod->descriptors[0],
- iod->first_dma);
- else if (iod->nr_descriptors == 1)
- dma_pool_free(dev->prp_page_pool, iod->descriptors[0],
- iod->first_dma);
- else
- nvme_free_prps(dev, req);
+ nvme_free_prps(dev, req);
mempool_free(iod->sgt.sgl, dev->iod_mempool);
}
nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
if (nprps <= (256 / 8)) {
pool = dev->prp_small_pool;
- iod->nr_descriptors = 0;
+ iod->nr_descriptors = -1;
} else {
pool = dev->prp_page_pool;
iod->nr_descriptors = 1;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) {
- iod->nr_descriptors = -1;
+ iod->nr_descriptors = 0;
return BLK_STS_RESOURCE;
}
iod->descriptors[0] = prp_list;
if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
pool = dev->prp_small_pool;
- iod->nr_descriptors = 0;
+ iod->nr_descriptors = -1;
} else {
pool = dev->prp_page_pool;
iod->nr_descriptors = 1;
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list) {
- iod->nr_descriptors = -1;
+ iod->nr_descriptors = 0;
return BLK_STS_RESOURCE;
}
blk_status_t ret;
iod->aborted = false;
- iod->nr_descriptors = -1;
+ iod->nr_descriptors = 0;
iod->sgt.nents = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);