]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sif: Avoid using SIFMT_2M for allocation of any tables in no_huge_page mode
authorKnut Omang <knut.omang@oracle.com>
Fri, 16 Sep 2016 09:32:32 +0000 (11:32 +0200)
committerKnut Omang <knut.omang@oracle.com>
Mon, 3 Oct 2016 12:02:21 +0000 (14:02 +0200)
The feature mask no_huge_pages, enabled for Xen due to
DMA address alignment issues with huge pages, did not apply
to allocation of CQs, RQs, and SQs, only to the tableworks.
This causes allocation of queues of these types
larger than 4M in total size to fail on Xen PV domains
such as dom0.

Orabug: 24683830

Signed-off-by: Knut Omang <knut.omang@oracle.com>
Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com>
drivers/infiniband/hw/sif/sif_cq.c
drivers/infiniband/hw/sif/sif_rq.c
drivers/infiniband/hw/sif/sif_sq.c

index d6d6ceca2de86170e28c8284e37fb8d6c00039cc..d49667330abe70272fe8f547bbeb64e441073900 100644 (file)
@@ -208,9 +208,12 @@ struct sif_cq *create_cq(struct sif_pd *pd, int entries,
 
        if (alloc_sz <= SIF_MAX_CONT)
                cq->mem = sif_mem_create_dmacont(sdev, alloc_sz, GFP_KERNEL | __GFP_ZERO, DMA_BIDIRECTIONAL);
-       else
+       else {
+               enum sif_mem_type memtype = sif_feature(no_huge_pages) ? SIFMT_4K : SIFMT_2M;
+
                cq->mem = sif_mem_create(sdev, alloc_sz >> PMD_SHIFT,
-                                       alloc_sz, SIFMT_2M, GFP_KERNEL | __GFP_ZERO, DMA_BIDIRECTIONAL);
+                                       alloc_sz, memtype, GFP_KERNEL | __GFP_ZERO, DMA_BIDIRECTIONAL);
+       }
        if (!cq->mem) {
                sif_log(sdev, SIF_INFO, "Failed to allocate %d CQ entries", entries);
                ecq = ERR_PTR(-ENOMEM);
index 8a7d3f1eadc6274af22b7df84998ee675723f0fb..f8c9685b7ba1b2ca6e6d5faa8a89a56abb2474ee 100644 (file)
@@ -143,9 +143,12 @@ int alloc_rq(struct sif_dev *sdev, struct sif_pd *pd,
 
        if (alloc_sz <= SIF_MAX_CONT)
                rq->mem = sif_mem_create_dmacont(sdev, alloc_sz, GFP_KERNEL | __GFP_ZERO, DMA_BIDIRECTIONAL);
-       else
+       else {
+               enum sif_mem_type memtype = sif_feature(no_huge_pages) ? SIFMT_4K : SIFMT_2M;
+
                rq->mem = sif_mem_create(sdev, alloc_sz >> PMD_SHIFT,
-                                       alloc_sz, SIFMT_2M, GFP_KERNEL | __GFP_ZERO, DMA_BIDIRECTIONAL);
+                                       alloc_sz, memtype, GFP_KERNEL | __GFP_ZERO, DMA_BIDIRECTIONAL);
+       }
        if (!rq->mem) {
                sif_log(sdev, SIF_INFO, "Failed RQ buffer pool allocation!");
                ret = -ENOMEM;
index 8ce97a2a6ebd85799ca8f2d20a43f48d236b256f..9c13976eecdfefa578892f32409daeb74e244a47 100644 (file)
@@ -167,9 +167,11 @@ int sif_alloc_sq(struct sif_dev *sdev, struct sif_pd *pd,
        if (alloc_sz <= SIF_MAX_CONT)
                sq->mem = sif_mem_create_dmacont(sdev, alloc_sz, GFP_KERNEL, DMA_BIDIRECTIONAL);
        else {
+               enum sif_mem_type memtype = sif_feature(no_huge_pages) ? SIFMT_4K : SIFMT_2M;
+
                alloc_sz = (alloc_sz + ~PMD_MASK) & PMD_MASK;
                sq->mem = sif_mem_create(sdev, alloc_sz >> PMD_SHIFT,
-                                       alloc_sz, SIFMT_2M, GFP_KERNEL | __GFP_ZERO,
+                                       alloc_sz, memtype, GFP_KERNEL | __GFP_ZERO,
                                        DMA_BIDIRECTIONAL);
        }
        if (!sq->mem) {