]> www.infradead.org Git - users/hch/block.git/commitdiff
dma-mapping: change the dma_alloc_iova / dma_link_range API
authorChristoph Hellwig <hch@lst.de>
Sat, 5 Oct 2024 09:40:18 +0000 (11:40 +0200)
committerChristoph Hellwig <hch@lst.de>
Sat, 5 Oct 2024 17:19:32 +0000 (19:19 +0200)
Return the IOVA passed to the hardware from dma_alloc_iova, as for
normal SGLish-API that's all the driver needs, and stop returning a
dma_addr_t from dma_link_range, as it will constant offsets from
the original allocation.

Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/infiniband/core/umem_odp.c
drivers/iommu/dma-iommu.c
drivers/vfio/pci/mlx5/cmd.c
include/linux/dma-mapping.h
include/linux/iommu-dma.h
kernel/dma/mapping.c

index 58fc3d4bfb73c8ee42f4f0ca11ccc52534b61545..133f00d3e1256ac99703de32960411018de4019d 100644 (file)
@@ -54,6 +54,7 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
        size_t page_size = 1UL << umem_odp->page_shift;
        unsigned long start, end;
        size_t ndmas, npfns;
+       dma_addr_t dma_addr;
        int ret;
 
        umem_odp->umem.is_odp = 1;
@@ -85,9 +86,11 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
 
        dma_init_iova_state(&umem_odp->state, dev->dma_device,
                            DMA_BIDIRECTIONAL);
-       ret = dma_alloc_iova(&umem_odp->state, end - start);
-       if (ret)
+       dma_addr = dma_alloc_iova(&umem_odp->state, 0, end - start);
+       if (dma_mapping_error(dev->dma_device, addr)) {
+               ret = -ENOMEM;
                goto out_pfn_list;
+       }
 
        ret = mmu_interval_notifier_insert(&umem_odp->notifier,
                                           umem_odp->umem.owning_mm, start,
index ce5b54dac5135e57eccd2f3d7b5a26541081e470..c9185c96f967210d22818195e1802b3c5863a7a2 100644 (file)
@@ -1745,23 +1745,24 @@ size_t iommu_dma_max_mapping_size(struct device *dev)
        return SIZE_MAX;
 }
 
-int iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
+dma_addr_t iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
                         size_t size)
 {
        struct iommu_domain *domain = iommu_get_dma_domain(state->dev);
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
        struct iova_domain *iovad = &cookie->iovad;
+       size_t iova_off = iova_offset(iovad, phys);
        dma_addr_t addr;
 
-       size = iova_align(iovad, size + iova_offset(iovad, phys));
+       size = iova_align(iovad, size + iova_off);
        addr = __iommu_dma_alloc_iova(domain, size, dma_get_mask(state->dev),
                                      state->dev);
        if (!addr)
-               return -EINVAL;
+               return DMA_MAPPING_ERROR;
 
        state->addr = addr;
        state->size = size;
-       return 0;
+       return addr + iova_off;
 }
 
 void iommu_dma_free_iova(struct dma_iova_state *state)
@@ -1789,8 +1790,8 @@ void iommu_dma_end_range(struct device *dev)
         */
 }
 
-dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
-                               size_t size, unsigned long attrs)
+int iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
+               size_t size, unsigned long attrs)
 {
        struct iommu_domain *domain = iommu_get_dma_domain(state->dev);
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -1812,7 +1813,7 @@ dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
                return ret;
 
        state->range_size += size;
-       return addr + iova_off;
+       return 0;
 }
 
 static void iommu_sync_dma_for_cpu(struct iommu_domain *domain,
index 1cb966c094f959b9764c9b71dcfd3920e16042d2..d17d1a09054288ca5ca3932891fe7d46c607fdba 100644 (file)
@@ -385,29 +385,35 @@ static int register_dma_pages(struct mlx5_core_dev *mdev, u32 npages,
 
        WARN_ON_ONCE(state->dir == DMA_NONE);
 
-       err = dma_alloc_iova(state, npages * PAGE_SIZE);
-       if (err)
-               return err;
+       mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in, klm_pas_mtt);
 
        dma_set_iova_state(state, page_list[0], PAGE_SIZE);
-
-       mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in, klm_pas_mtt);
        err = dma_start_range(state);
-       if (err) {
-               dma_free_iova(state);
+       if (err)
                return err;
-       }
-       for (i = 0; i < npages; i++) {
-               if (dma_can_use_iova(state))
-                       addr = dma_link_range(state, page_to_phys(page_list[i]),
-                                             PAGE_SIZE);
-               else
+
+       if (dma_can_use_iova(state)) {
+               addr = dma_alloc_iova(state, 0, npages * PAGE_SIZE);
+               if (dma_mapping_error(mdev->device, addr))
+                       return -ENOMEM;
+
+               for (i = 0; i < npages; i++) {
+                       err = dma_link_range(state, page_to_phys(page_list[i]),
+                                       PAGE_SIZE);
+                       if (err)
+                               goto error;
+               }
+               *mtt++ = cpu_to_be64(addr);
+               addr += PAGE_SIZE;
+       } else {
+               for (i = 0; i < npages; i++) {
                        addr = dma_map_page(mdev->device, page_list[i], 0,
                                            PAGE_SIZE, state->dir);
-               err = dma_mapping_error(mdev->device, addr);
-               if (err)
-                       goto error;
-               *mtt++ = cpu_to_be64(addr);
+                       err = dma_mapping_error(mdev->device, addr);
+                       if (err)
+                               goto error;
+                       *mtt++ = cpu_to_be64(addr);
+               }
        }
        dma_end_range(state);
 
index bfbd1092980ca6d8e278facae77f4570e9f61815..3924ba73231b6d9199c2c57bd46f555d80383621 100644 (file)
@@ -120,8 +120,8 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
        return 0;
 }
 
-int dma_alloc_iova_unaligned(struct dma_iova_state *state, phys_addr_t phys,
-                            size_t size);
+dma_addr_t dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
+               size_t size);
 void dma_free_iova(struct dma_iova_state *state);
 void dma_unlink_and_free_iova(struct dma_iova_state *state,
                dma_addr_t dma_addr, size_t size);
@@ -183,13 +183,13 @@ void dma_set_iova_state(struct dma_iova_state *state, struct page *page,
 bool dma_can_use_iova(struct dma_iova_state *state);
 int dma_start_range(struct dma_iova_state *state);
 void dma_end_range(struct dma_iova_state *state);
-dma_addr_t dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys,
-                               size_t size, unsigned long attrs);
+int dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys,
+               size_t size, unsigned long attrs);
 #else /* CONFIG_HAS_DMA */
-static inline int dma_alloc_iova_unaligned(struct dma_iova_state *state,
-                                          phys_addr_t phys, size_t size)
+static inline dma_addr_t dma_alloc_iova(struct dma_iova_state *state,
+               phys_addr_t phys, size_t size)
 {
-       return -EOPNOTSUPP;
+       return DMA_MAPPING_ERROR;
 }
 static inline void dma_free_iova(struct dma_iova_state *state)
 {
@@ -353,9 +353,8 @@ static inline int dma_start_range(struct dma_iova_state *state)
 static inline void dma_end_range(struct dma_iova_state *state)
 {
 }
-static inline dma_addr_t dma_link_range_attrs(struct dma_iova_state *state,
-                                             phys_addr_t phys, size_t size,
-                                             unsigned long attrs)
+static inline int dma_link_range_attrs(struct dma_iova_state *state,
+               phys_addr_t phys, size_t size, unsigned long attrs)
 {
        return DMA_MAPPING_ERROR;
 }
@@ -436,11 +435,6 @@ static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
        return false;
 }
 #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
-static inline int dma_alloc_iova(struct dma_iova_state *state, size_t size)
-{
-       return dma_alloc_iova_unaligned(state, 0, size);
-}
-
 struct page *dma_alloc_pages(struct device *dev, size_t size,
                dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
 void dma_free_pages(struct device *dev, size_t size, struct page *page,
index 90c8b4a02498e9cbfacb66d20c8eca91546d0787..f2550efb098bcaee8ff25d68c0db7b5033f41ecb 100644 (file)
@@ -66,14 +66,14 @@ void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
                int nelems, enum dma_data_direction dir);
 void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
                int nelems, enum dma_data_direction dir);
-int iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
+dma_addr_t iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
                size_t size);
 void iommu_dma_free_iova(struct dma_iova_state *state);
 void iommu_dma_unlink_and_free_iova(struct dma_iova_state *state,
                dma_addr_t dma_addr, size_t size);
 int iommu_dma_start_range(struct device *dev);
 void iommu_dma_end_range(struct device *dev);
-dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
+int iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
                size_t size, unsigned long attrs);
 void iommu_dma_unlink_range(struct device *dev, dma_addr_t start, size_t size,
                enum dma_data_direction dir, unsigned long attrs);
index 152078d0ffe20e05e23dc88007de68fd888ee151..0410aaf49dde0846d8f2d70903361123565cb881 100644 (file)
@@ -967,24 +967,29 @@ unsigned long dma_get_merge_boundary(struct device *dev)
 EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
 
 /**
- * dma_alloc_iova_unaligned - Allocate an IOVA space
+ * dma_alloc_iova - Allocate an IOVA space
  * @state: IOVA state
  * @phys: physical address
  * @size: IOVA size
  *
  * Allocate an IOVA space for the given IOVA state and size. The IOVA space
  * is allocated to the worst case when whole range is going to be used.
+ *
+ * Note: @phys is only used to calculate the IOVA alignent.  Callers that always
+ * do IOMMU granule aligned transfers can safely pass 0 here.
+ *
+ * Returns the IOVA to be used for the transfer.
  */
-int dma_alloc_iova_unaligned(struct dma_iova_state *state, phys_addr_t phys,
-                            size_t size)
+dma_addr_t dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
+               size_t size)
 {
-       if (!use_dma_iommu(state->dev))
-               return 0;
-
-       WARN_ON_ONCE(!size);
+       if (WARN_ON_ONCE(!use_dma_iommu(state->dev)))
+               return DMA_MAPPING_ERROR;
+       if (WARN_ON_ONCE(!size))
+               return DMA_MAPPING_ERROR;
        return iommu_dma_alloc_iova(state, phys, size);
 }
-EXPORT_SYMBOL_GPL(dma_alloc_iova_unaligned);
+EXPORT_SYMBOL_GPL(dma_alloc_iova);
 
 /**
  * dma_free_iova - Free an IOVA space
@@ -1085,8 +1090,8 @@ EXPORT_SYMBOL_GPL(dma_end_range);
  *
  * Link a range of IOVA space for the given IOVA state.
  */
-dma_addr_t dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys,
-                               size_t size, unsigned long attrs)
+int dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys,
+               size_t size, unsigned long attrs)
 {
        return iommu_dma_link_range(state, phys, size, attrs);
 }