]> www.infradead.org Git - users/hch/block.git/commitdiff
RDMA/core: Separate DMA mapping to caching IOVA and page linkage
authorLeon Romanovsky <leonro@nvidia.com>
Tue, 31 Oct 2023 10:57:56 +0000 (12:57 +0200)
committerLeon Romanovsky <leon@kernel.org>
Thu, 3 Oct 2024 16:05:52 +0000 (19:05 +0300)
Reuse newly added DMA API to cache IOVA and only link/unlink pages
in fast path.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/mlx5/odp.c
include/rdma/ib_umem_odp.h
kernel/dma/mapping.c

index 72885eca4181e0a12411f5d5625e26d81128501a..7bfa1e54454c1296a5f0c134d86c6e255ba76cc7 100644 (file)
@@ -81,19 +81,12 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
                if (!umem_odp->pfn_list)
                        return -ENOMEM;
 
-               umem_odp->dma_list = kvcalloc(
-                       ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL);
-               if (!umem_odp->dma_list) {
-                       ret = -ENOMEM;
-                       goto out_pfn_list;
-               }
 
                dma_init_iova_state(&umem_odp->state, dev->dma_device,
                                    DMA_BIDIRECTIONAL);
                ret = dma_alloc_iova(&umem_odp->state, end - start);
                if (ret)
-                       goto out_dma_list;
-
+                       goto out_pfn_list;
 
                ret = mmu_interval_notifier_insert(&umem_odp->notifier,
                                                   umem_odp->umem.owning_mm,
@@ -106,8 +99,6 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
 
 out_free_iova:
        dma_free_iova(&umem_odp->state);
-out_dma_list:
-       kvfree(umem_odp->dma_list);
 out_pfn_list:
        kvfree(umem_odp->pfn_list);
        return ret;
@@ -285,7 +276,6 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
                mutex_unlock(&umem_odp->umem_mutex);
                mmu_interval_notifier_remove(&umem_odp->notifier);
                dma_free_iova(&umem_odp->state);
-               kvfree(umem_odp->dma_list);
                kvfree(umem_odp->pfn_list);
        }
        put_pid(umem_odp->tgid);
@@ -293,40 +283,10 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
 }
 EXPORT_SYMBOL(ib_umem_odp_release);
 
-/*
- * Map for DMA and insert a single page into the on-demand paging page tables.
- *
- * @umem: the umem to insert the page to.
- * @dma_index: index in the umem to add the dma to.
- * @page: the page struct to map and add.
- * @access_mask: access permissions needed for this page.
- *
- * The function returns -EFAULT if the DMA mapping operation fails.
- *
- */
-static int ib_umem_odp_map_dma_single_page(
-               struct ib_umem_odp *umem_odp,
-               unsigned int dma_index,
-               struct page *page)
-{
-       struct ib_device *dev = umem_odp->umem.ibdev;
-       dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index];
-
-       *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
-                                   DMA_BIDIRECTIONAL);
-       if (ib_dma_mapping_error(dev, *dma_addr)) {
-               *dma_addr = 0;
-               return -EFAULT;
-       }
-       umem_odp->npages++;
-       return 0;
-}
-
 /**
  * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
  *
  * Maps the range passed in the argument to DMA addresses.
- * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
  * Upon success the ODP MR will be locked to let caller complete its device
  * page table update.
  *
@@ -434,15 +394,6 @@ retry:
                                  __func__, hmm_order, page_shift);
                        break;
                }
-
-               ret = ib_umem_odp_map_dma_single_page(
-                               umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]));
-               if (ret < 0) {
-                       ibdev_dbg(umem_odp->umem.ibdev,
-                                 "ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
-                       break;
-               }
-               range.hmm_pfns[pfn_index] |= HMM_PFN_DMA_MAPPED;
        }
        /* upon success lock should stay on hold for the callee */
        if (!ret)
@@ -462,10 +413,8 @@ EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
                                 u64 bound)
 {
-       dma_addr_t dma;
        int idx;
        u64 addr;
-       struct ib_device *dev = umem_odp->umem.ibdev;
 
        lockdep_assert_held(&umem_odp->umem_mutex);
 
@@ -473,19 +422,19 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
        bound = min_t(u64, bound, ib_umem_end(umem_odp));
        for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
                unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
-               struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
 
                idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
-               dma = umem_odp->dma_list[idx];
 
                if (!(umem_odp->pfn_list[pfn_idx] & HMM_PFN_VALID))
                        continue;
                if (!(umem_odp->pfn_list[pfn_idx] & HMM_PFN_DMA_MAPPED))
                        continue;
 
-               ib_dma_unmap_page(dev, dma, BIT(umem_odp->page_shift),
-                                 DMA_BIDIRECTIONAL);
+               dma_hmm_unlink_page(&umem_odp->state,
+                                   &umem_odp->pfn_list[pfn_idx],
+                                   idx * (1 << umem_odp->page_shift));
                if (umem_odp->pfn_list[pfn_idx] & HMM_PFN_WRITE) {
+                       struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
                        struct page *head_page = compound_head(page);
                        /*
                         * set_page_dirty prefers being called with
index 96eda5db45456b0453b65ca8f1cd44fa3e75ef55..6e1e1d3d33cb2e40c19faa73894adbf244f0e2b4 100644 (file)
@@ -164,6 +164,7 @@ static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
 {
        struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
        bool downgrade = flags & MLX5_IB_UPD_XLT_DOWNGRADE;
+       struct ib_device *dev = odp->umem.ibdev;
        unsigned long pfn;
        dma_addr_t pa;
        size_t i;
@@ -177,12 +178,16 @@ static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
                        /* Initial ODP init */
                        continue;
 
-               pa = odp->dma_list[idx + i];
+               pa = dma_hmm_link_page(&odp->state, &odp->pfn_list[idx + i],
+                                      (idx + i) * (1 << odp->page_shift));
+               WARN_ON_ONCE(ib_dma_mapping_error(dev, pa));
+
                pa |= MLX5_IB_MTT_READ;
                if ((pfn & HMM_PFN_WRITE) && !downgrade)
                        pa |= MLX5_IB_MTT_WRITE;
 
                pas[i] = cpu_to_be64(pa);
+               odp->npages++;
        }
 }
 
index f99911b478c4ca9a0487cb041e7e7168a7103fbd..cb081c69fd1a4871908a76ffed7577a1cd328d49 100644 (file)
@@ -18,15 +18,9 @@ struct ib_umem_odp {
        /* An array of the pfns included in the on-demand paging umem. */
        unsigned long *pfn_list;
 
-       /*
-        * An array with DMA addresses mapped for pfns in pfn_list.
-        * The lower two bits designate access permissions.
-        * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT.
-        */
-       dma_addr_t              *dma_list;
        struct dma_iova_state state;
        /*
-        * The umem_mutex protects the page_list and dma_list fields of an ODP
+        * The umem_mutex protects the page_list field of an ODP
         * umem, allowing only a single thread to map/unmap pages. The mutex
         * also protects access to the mmu notifier counters.
         */
index 21ec74e3cdb14c388665654c5c8de73b44a24eb0..c4880d45814730d5747d8d0afc6885283fa79ae8 100644 (file)
@@ -1119,7 +1119,7 @@ dma_addr_t dma_hmm_link_page(struct dma_iova_state *state, unsigned long *pfn,
        struct page *page = hmm_pfn_to_page(*pfn);
        phys_addr_t phys = page_to_phys(page);
        bool coherent = dev_is_dma_coherent(dev);
-       dma_addr_t addr;
+       dma_addr_t addr = phys_to_dma(dev, phys);
        int ret;
 
        if (*pfn & HMM_PFN_DMA_MAPPED)
@@ -1134,8 +1134,7 @@ dma_addr_t dma_hmm_link_page(struct dma_iova_state *state, unsigned long *pfn,
                 * The DMA address calculation below is based on the fact that
                 * HMM doesn't work with swiotlb.
                 */
-               return (state->addr) ? state->addr + dma_offset :
-                                      phys_to_dma(dev, phys);
+               return (state->addr) ? state->addr + dma_offset : addr;
 
        state->range_size = dma_offset;
 
@@ -1147,8 +1146,6 @@ dma_addr_t dma_hmm_link_page(struct dma_iova_state *state, unsigned long *pfn,
        if (!use_dma_iommu(dev)) {
                if (!coherent)
                        arch_sync_dma_for_device(phys, PAGE_SIZE, state->dir);
-
-               addr = phys_to_dma(dev, phys);
                goto done;
        }