*/
#define NVME_MAX_KB_SZ 8192
#define NVME_MAX_SEGS 128
-#define NVME_MAX_NR_ALLOCATIONS 5
+#define NVME_MAX_NR_DESCRIPTORS 5
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0444);
struct completion delete_done;
};
-union nvme_descriptor {
- struct nvme_sgl_desc *sg_list;
- __le64 *prp_list;
-};
-
/*
* The nvme_iod describes the data in an I/O.
- *
- * The sg pointer contains the list of PRP/SGL chunk allocations in addition
- * to the actual struct scatterlist.
*/
struct nvme_iod {
struct nvme_request req;
struct nvme_command cmd;
bool aborted;
- s8 nr_allocations; /* PRP list pool allocations. 0 means small
- pool in use */
+ /* # of PRP/SGL descriptors: (0 for small pool) */
+ s8 nr_descriptors;
unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t first_dma;
dma_addr_t meta_dma;
struct sg_table sgt;
- union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS];
+ void *descriptors[NVME_MAX_NR_DESCRIPTORS];
};
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
dma_addr_t dma_addr = iod->first_dma;
int i;
- for (i = 0; i < iod->nr_allocations; i++) {
- __le64 *prp_list = iod->list[i].prp_list;
+ for (i = 0; i < iod->nr_descriptors; i++) {
+ __le64 *prp_list = iod->descriptors[i];
dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
- if (iod->nr_allocations == 0)
- dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list,
+ if (iod->nr_descriptors == 0)
+ dma_pool_free(dev->prp_small_pool, iod->descriptors[0],
iod->first_dma);
- else if (iod->nr_allocations == 1)
- dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list,
+ else if (iod->nr_descriptors == 1)
+ dma_pool_free(dev->prp_page_pool, iod->descriptors[0],
iod->first_dma);
else
nvme_free_prps(dev, req);
nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
if (nprps <= (256 / 8)) {
pool = dev->prp_small_pool;
- iod->nr_allocations = 0;
+ iod->nr_descriptors = 0;
} else {
pool = dev->prp_page_pool;
- iod->nr_allocations = 1;
+ iod->nr_descriptors = 1;
}
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) {
- iod->nr_allocations = -1;
+ iod->nr_descriptors = -1;
return BLK_STS_RESOURCE;
}
- iod->list[0].prp_list = prp_list;
+ iod->descriptors[0] = prp_list;
iod->first_dma = prp_dma;
i = 0;
for (;;) {
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
goto free_prps;
- iod->list[iod->nr_allocations++].prp_list = prp_list;
+ iod->descriptors[iod->nr_descriptors++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
i = 1;
if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
pool = dev->prp_small_pool;
- iod->nr_allocations = 0;
+ iod->nr_descriptors = 0;
} else {
pool = dev->prp_page_pool;
- iod->nr_allocations = 1;
+ iod->nr_descriptors = 1;
}
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list) {
- iod->nr_allocations = -1;
+ iod->nr_descriptors = -1;
return BLK_STS_RESOURCE;
}
- iod->list[0].sg_list = sg_list;
+ iod->descriptors[0] = sg_list;
iod->first_dma = sgl_dma;
nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
blk_status_t ret;
iod->aborted = false;
- iod->nr_allocations = -1;
+ iod->nr_descriptors = -1;
iod->sgt.nents = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE);
BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE);
- BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS);
+ BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_DESCRIPTORS);
return pci_register_driver(&nvme_driver);
}