]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mlx4_ib: contig support for control objects
authorYishai Hadas <yishaih@mellanox.com>
Mon, 25 Jun 2012 12:31:12 +0000 (15:31 +0300)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Tue, 7 Jul 2015 21:38:10 +0000 (14:38 -0700)
Reviewer: Shachar Raindel <raindel@mellanox.com>
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
(Ported from Mellanox OFED 2.4)

Signed-off-by: Mukesh Kacker <mukesh.kacker@oracle.com>
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c

index 0176caa5792c4576276470c2c3f86f0fca16a7bd..e41c3d69c3bc8228a21149df2c919a4da349543f 100644 (file)
@@ -140,14 +140,18 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
 {
        int err;
        int cqe_size = dev->dev->caps.cqe_size;
+       int shift;
+       int n;
 
        *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
                            IB_ACCESS_LOCAL_WRITE, 1);
        if (IS_ERR(*umem))
                return PTR_ERR(*umem);
 
-       err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
-                           ilog2((*umem)->page_size), &buf->mtt);
+       n = ib_umem_page_count(*umem);
+       shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
+       err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
+
        if (err)
                goto err_buf;
 
index fce3934372a161680e4e4f2dd9716963e1178790..224ed9c9c1f69885f24c2d43591c40ae2452da7f 100644 (file)
@@ -652,6 +652,9 @@ void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
                           struct ib_umem *umem);
+int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
+                                               u64 start_va,
+                                               int *num_of_mtts);
 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                                  u64 virt_addr, int access_flags,
                                  struct ib_udata *udata);
index 3519d4547b20ca5edd9ce757fcfbef0809baeafd..8e4889d447a4347133ee0a60102caca82ff4ff71 100644 (file)
@@ -263,7 +263,7 @@ static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
     All chunks in the middle already handled as part of mtt shift calculation
     for both their start & end addresses.
 */
-static int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
+int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
                                                u64 start_va,
                                                int *num_of_mtts)
 {
index 697fc31bc42cfe75c80301ca0f6a990f0dd5fe22..156a11a048f0645852c8246ba7669368f59a4480 100644 (file)
@@ -709,6 +709,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 
        if (pd->uobject) {
                struct mlx4_ib_create_qp ucmd;
+               int shift;
+               int n;
 
                if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
                        err = -EFAULT;
@@ -728,8 +730,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                        goto err;
                }
 
-               err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
-                                   ilog2(qp->umem->page_size), &qp->mtt);
+               n = ib_umem_page_count(qp->umem);
+               shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
+               err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+
                if (err)
                        goto err_buf;