WARN_ON(unmapped != size);
}
+void iommu_dma_unlink_and_free_iova(struct dma_iova_state *state,
+ dma_addr_t dma_addr, size_t size)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(state->dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iommu_iotlb_gather iotlb_gather;
+ size_t unmapped;
+
+ if (WARN_ON_ONCE(dma_addr + size > state->addr + state->range_size))
+ return;
+
+ iommu_iotlb_gather_init(&iotlb_gather);
+ iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
+
+ if (!dev_is_dma_coherent(state->dev))
+ iommu_sync_dma_for_cpu(domain, dma_addr, size, state->dir);
+
+ unmapped = iommu_unmap_fast(domain, state->addr, state->range_size,
+ &iotlb_gather);
+ WARN_ON_ONCE(unmapped != state->range_size);
+
+ if (!iotlb_gather.queued)
+ iommu_iotlb_sync(domain, &iotlb_gather);
+ __iommu_dma_free_iova(cookie, state->addr, state->size, &iotlb_gather);
+}
+
bool iommu_can_use_iova(struct device *dev, struct page *page, size_t size,
enum dma_data_direction dir)
{
WARN_ON_ONCE(state->dir == DMA_NONE);
if (dma_can_use_iova(state)) {
- dma_unlink_range(state);
+ dma_unlink_and_free_iova(state, state->addr, state->range_size);
} else {
mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in,
klm_pas_mtt);
dma_unmap_page(state->dev, addr, PAGE_SIZE, state->dir);
}
}
- dma_free_iova(state);
}
static int register_dma_pages(struct mlx5_core_dev *mdev, u32 npages,
int dma_alloc_iova_unaligned(struct dma_iova_state *state, phys_addr_t phys,
size_t size);
void dma_free_iova(struct dma_iova_state *state);
+void dma_unlink_and_free_iova(struct dma_iova_state *state,
+ dma_addr_t dma_addr, size_t size);
dma_addr_t dma_hmm_link_page(struct dma_iova_state *state, unsigned long *pfn,
dma_addr_t dma_offset);
void dma_hmm_unlink_page(struct dma_iova_state *state, unsigned long *pfn,
void dma_end_range(struct dma_iova_state *state);
dma_addr_t dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys,
size_t size, unsigned long attrs);
-void dma_unlink_range_attrs(struct dma_iova_state *state, unsigned long attrs);
#else /* CONFIG_HAS_DMA */
static inline int dma_alloc_iova_unaligned(struct dma_iova_state *state,
phys_addr_t phys, size_t size)
static inline void dma_free_iova(struct dma_iova_state *state)
{
}
+static inline void dma_unlink_and_free_iova(struct dma_iova_state *state,
+ dma_addr_t dma_addr, size_t size)
+{
+}
static inline dma_addr_t dma_hmm_link_page(struct dma_iova_state *state,
unsigned long *pfn,
dma_addr_t dma_offset)
{
return DMA_MAPPING_ERROR;
}
-static inline void dma_unlink_range_attrs(struct dma_iova_state *state,
- unsigned long attrs)
-{
-}
#endif /* CONFIG_HAS_DMA */
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
#define dma_link_range(d, p, o) dma_link_range_attrs(d, p, o, 0)
-#define dma_unlink_range(d) dma_unlink_range_attrs(d, 0)
bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
int iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
size_t size);
void iommu_dma_free_iova(struct dma_iova_state *state);
+void iommu_dma_unlink_and_free_iova(struct dma_iova_state *state,
+ dma_addr_t dma_addr, size_t size);
int iommu_dma_start_range(struct device *dev);
void iommu_dma_end_range(struct device *dev);
dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
}
EXPORT_SYMBOL_GPL(dma_free_iova);
+void dma_unlink_and_free_iova(struct dma_iova_state *state,
+ dma_addr_t dma_addr, size_t size)
+{
+ if (!use_dma_iommu(state->dev))
+ return;
+ iommu_dma_unlink_and_free_iova(state, dma_addr, size);
+}
+EXPORT_SYMBOL_GPL(dma_unlink_and_free_iova);
+
/**
* dma_set_iova_state - Set the IOVA state for the given page and size
* @state: IOVA state
}
EXPORT_SYMBOL_GPL(dma_link_range_attrs);
-/**
- * dma_unlink_range_attrs - Unlink a range of IOVA space
- * @state: IOVA state
- * @attrs: attributes of mapping properties
- *
- * Unlink a range of IOVA space for the given IOVA state.
- */
-void dma_unlink_range_attrs(struct dma_iova_state *state, unsigned long attrs)
-{
- iommu_dma_unlink_range(state->dev, state->addr, state->range_size,
- state->dir, attrs);
-}
-EXPORT_SYMBOL_GPL(dma_unlink_range_attrs);
-
/**
* dma_hmm_link_page - Link a physical HMM page to DMA address
* @state: IOVA state