#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
+#define NVME_SMALL_POOL_SIZE 256
+
/*
* These can be higher, but we need to ensure that any command doesn't
* require an sg allocation that needs more than a page of data.
}
if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <=
- 256 / sizeof(__le64))
+ NVME_SMALL_POOL_SIZE / sizeof(__le64))
iod->flags |= IOD_SMALL_POOL;
prp_list = dma_pool_alloc(nvme_dma_pool(dev, iod), GFP_ATOMIC,
return BLK_STS_OK;
}
- if (entries <= 256 / sizeof(*sg_list))
+ if (entries <= NVME_SMALL_POOL_SIZE / sizeof(*sg_list))
iod->flags |= IOD_SMALL_POOL;
sg_list = dma_pool_alloc(nvme_dma_pool(dev, iod), GFP_ATOMIC, &sgl_dma);
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
- size_t small_align = 256;
+ size_t small_align = NVME_SMALL_POOL_SIZE;
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
NVME_CTRL_PAGE_SIZE,
if (!dev->prp_page_pool)
return -ENOMEM;
+ static_assert(NVME_SMALL_POOL_SIZE < 512);
if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
small_align = 512;
/* Optimisation for I/Os between 4k and 128k */
- dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
- 256, small_align, 0);
+ dev->prp_small_pool = dma_pool_create("prp list small", dev->dev,
+ NVME_SMALL_POOL_SIZE, small_align, 0);
if (!dev->prp_small_pool) {
dma_pool_destroy(dev->prp_page_pool);
return -ENOMEM;