]> www.infradead.org Git - users/hch/block.git/commitdiff
RDMA/umem: Preallocate and cache IOVA for UMEM ODP
authorLeon Romanovsky <leonro@nvidia.com>
Mon, 27 Nov 2023 12:23:53 +0000 (14:23 +0200)
committerLeon Romanovsky <leon@kernel.org>
Thu, 3 Oct 2024 16:05:52 +0000 (19:05 +0300)
As a preparation to provide two step interface to map pages,
preallocate IOVA when UMEM is initialized.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
drivers/infiniband/core/umem_odp.c
include/rdma/ib_umem_odp.h

index e9fa22d31c233204ccd8b7decc7353b3629bad3c..01cbf7f55b3ae4158cd0266178d84e8713f7701b 100644 (file)
@@ -50,6 +50,7 @@
 static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
                                   const struct mmu_interval_notifier_ops *ops)
 {
+       struct ib_device *dev = umem_odp->umem.ibdev;
        int ret;
 
        umem_odp->umem.is_odp = 1;
@@ -87,15 +88,24 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
                        goto out_pfn_list;
                }
 
+               dma_init_iova_state(&umem_odp->state, dev->dma_device,
+                                   DMA_BIDIRECTIONAL);
+               ret = dma_alloc_iova(&umem_odp->state, end - start);
+               if (ret)
+                       goto out_dma_list;
+
+
                ret = mmu_interval_notifier_insert(&umem_odp->notifier,
                                                   umem_odp->umem.owning_mm,
                                                   start, end - start, ops);
                if (ret)
-                       goto out_dma_list;
+                       goto out_free_iova;
        }
 
        return 0;
 
+out_free_iova:
+       dma_free_iova(&umem_odp->state);
 out_dma_list:
        kvfree(umem_odp->dma_list);
 out_pfn_list:
@@ -274,6 +284,7 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
                                            ib_umem_end(umem_odp));
                mutex_unlock(&umem_odp->umem_mutex);
                mmu_interval_notifier_remove(&umem_odp->notifier);
+               dma_free_iova(&umem_odp->state);
                kvfree(umem_odp->dma_list);
                kvfree(umem_odp->pfn_list);
        }
index 0844c1d05ac619b5d3c0c58263088d0f09fbdcc1..c0c1215925eb74b652b59620d3cf871ba2e4518a 100644 (file)
@@ -23,6 +23,7 @@ struct ib_umem_odp {
         * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT.
         */
        dma_addr_t              *dma_list;
+       struct dma_iova_state state;
        /*
         * The umem_mutex protects the page_list and dma_list fields of an ODP
         * umem, allowing only a single thread to map/unmap pages. The mutex