]> www.infradead.org Git - users/hch/block.git/commitdiff
dma-mapping: add a dma_unlink_and_free_iova API
authorChristoph Hellwig <hch@lst.de>
Sat, 5 Oct 2024 09:21:12 +0000 (11:21 +0200)
committerChristoph Hellwig <hch@lst.de>
Sat, 5 Oct 2024 17:18:08 +0000 (19:18 +0200)
This combines the unmapping and freeing of the IOVA so that the
father structured and queued mode can be used for the common case.

Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/iommu/dma-iommu.c
drivers/vfio/pci/mlx5/cmd.c
include/linux/dma-mapping.h
include/linux/iommu-dma.h
kernel/dma/mapping.c

index 9076b1b1d9c0abef34efaeeee71c5e4f28846271..ce5b54dac5135e57eccd2f3d7b5a26541081e470 100644 (file)
@@ -1852,6 +1852,32 @@ void iommu_dma_unlink_range(struct device *dev, dma_addr_t start, size_t size,
        WARN_ON(unmapped != size);
 }
 
+void iommu_dma_unlink_and_free_iova(struct dma_iova_state *state,
+               dma_addr_t dma_addr, size_t size)
+{
+       struct iommu_domain *domain = iommu_get_dma_domain(state->dev);
+       struct iommu_dma_cookie *cookie = domain->iova_cookie;
+       struct iommu_iotlb_gather iotlb_gather;
+       size_t unmapped;
+
+       if (WARN_ON_ONCE(dma_addr + size > state->addr + state->range_size))
+               return;
+
+       iommu_iotlb_gather_init(&iotlb_gather);
+       iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
+
+       if (!dev_is_dma_coherent(state->dev))
+               iommu_sync_dma_for_cpu(domain, dma_addr, size, state->dir);
+
+       unmapped = iommu_unmap_fast(domain, state->addr, state->range_size,
+                       &iotlb_gather);
+       WARN_ON_ONCE(unmapped != state->range_size);
+
+       if (!iotlb_gather.queued)
+               iommu_iotlb_sync(domain, &iotlb_gather);
+       __iommu_dma_free_iova(cookie, state->addr, state->size, &iotlb_gather);
+}
+
 bool iommu_can_use_iova(struct device *dev, struct page *page, size_t size,
                        enum dma_data_direction dir)
 {
index 2a846bb6d8bed0b53f64dae644be73c595ee297e..1cb966c094f959b9764c9b71dcfd3920e16042d2 100644 (file)
@@ -364,7 +364,7 @@ static void unregister_dma_pages(struct mlx5_core_dev *mdev, u32 npages,
        WARN_ON_ONCE(state->dir == DMA_NONE);
 
        if (dma_can_use_iova(state)) {
-               dma_unlink_range(state);
+               dma_unlink_and_free_iova(state, state->addr, state->range_size);
        } else {
                mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in,
                                             klm_pas_mtt);
@@ -373,7 +373,6 @@ static void unregister_dma_pages(struct mlx5_core_dev *mdev, u32 npages,
                        dma_unmap_page(state->dev, addr, PAGE_SIZE, state->dir);
                }
        }
-       dma_free_iova(state);
 }
 
 static int register_dma_pages(struct mlx5_core_dev *mdev, u32 npages,
index 0638918a74f822a8cb743385c05b20646d34b763..bfbd1092980ca6d8e278facae77f4570e9f61815 100644 (file)
@@ -123,6 +123,8 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 int dma_alloc_iova_unaligned(struct dma_iova_state *state, phys_addr_t phys,
                             size_t size);
 void dma_free_iova(struct dma_iova_state *state);
+void dma_unlink_and_free_iova(struct dma_iova_state *state,
+               dma_addr_t dma_addr, size_t size);
 dma_addr_t dma_hmm_link_page(struct dma_iova_state *state, unsigned long *pfn,
                             dma_addr_t dma_offset);
 void dma_hmm_unlink_page(struct dma_iova_state *state, unsigned long *pfn,
@@ -183,7 +185,6 @@ int dma_start_range(struct dma_iova_state *state);
 void dma_end_range(struct dma_iova_state *state);
 dma_addr_t dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys,
                                size_t size, unsigned long attrs);
-void dma_unlink_range_attrs(struct dma_iova_state *state, unsigned long attrs);
 #else /* CONFIG_HAS_DMA */
 static inline int dma_alloc_iova_unaligned(struct dma_iova_state *state,
                                           phys_addr_t phys, size_t size)
@@ -193,6 +194,10 @@ static inline int dma_alloc_iova_unaligned(struct dma_iova_state *state,
 static inline void dma_free_iova(struct dma_iova_state *state)
 {
 }
+static inline void dma_unlink_and_free_iova(struct dma_iova_state *state,
+               dma_addr_t dma_addr, size_t size)
+{
+}
 static inline dma_addr_t dma_hmm_link_page(struct dma_iova_state *state,
                                           unsigned long *pfn,
                                           dma_addr_t dma_offset)
@@ -354,10 +359,6 @@ static inline dma_addr_t dma_link_range_attrs(struct dma_iova_state *state,
 {
        return DMA_MAPPING_ERROR;
 }
-static inline void dma_unlink_range_attrs(struct dma_iova_state *state,
-                                         unsigned long attrs)
-{
-}
 #endif /* CONFIG_HAS_DMA */
 
 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
@@ -553,7 +554,6 @@ static inline void dma_sync_sgtable_for_device(struct device *dev,
 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
 #define dma_link_range(d, p, o) dma_link_range_attrs(d, p, o, 0)
-#define dma_unlink_range(d) dma_unlink_range_attrs(d, 0)
 
 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
 
index 0832b115a43684eb5a789e94b903fb8ed13cb8f8..90c8b4a02498e9cbfacb66d20c8eca91546d0787 100644 (file)
@@ -69,6 +69,8 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
 int iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
                size_t size);
 void iommu_dma_free_iova(struct dma_iova_state *state);
+void iommu_dma_unlink_and_free_iova(struct dma_iova_state *state,
+               dma_addr_t dma_addr, size_t size);
 int iommu_dma_start_range(struct device *dev);
 void iommu_dma_end_range(struct device *dev);
 dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
index c4880d45814730d5747d8d0afc6885283fa79ae8..152078d0ffe20e05e23dc88007de68fd888ee151 100644 (file)
@@ -1001,6 +1001,15 @@ void dma_free_iova(struct dma_iova_state *state)
 }
 EXPORT_SYMBOL_GPL(dma_free_iova);
 
+void dma_unlink_and_free_iova(struct dma_iova_state *state,
+               dma_addr_t dma_addr, size_t size)
+{
+       if (!use_dma_iommu(state->dev))
+               return;
+       iommu_dma_unlink_and_free_iova(state, dma_addr, size);
+}
+EXPORT_SYMBOL_GPL(dma_unlink_and_free_iova);
+
 /**
  * dma_set_iova_state - Set the IOVA state for the given page and size
  * @state: IOVA state
@@ -1083,20 +1092,6 @@ dma_addr_t dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys,
 }
 EXPORT_SYMBOL_GPL(dma_link_range_attrs);
 
-/**
- * dma_unlink_range_attrs - Unlink a range of IOVA space
- * @state: IOVA state
- * @attrs: attributes of mapping properties
- *
- * Unlink a range of IOVA space for the given IOVA state.
- */
-void dma_unlink_range_attrs(struct dma_iova_state *state, unsigned long attrs)
-{
-       iommu_dma_unlink_range(state->dev, state->addr, state->range_size,
-                              state->dir, attrs);
-}
-EXPORT_SYMBOL_GPL(dma_unlink_range_attrs);
-
 /**
  * dma_hmm_link_page - Link a physical HMM page to DMA address
  * @state: IOVA state