size_t page_size = 1UL << umem_odp->page_shift;
unsigned long start, end;
size_t ndmas, npfns;
+ dma_addr_t dma_addr;
int ret;
umem_odp->umem.is_odp = 1;
dma_init_iova_state(&umem_odp->state, dev->dma_device,
DMA_BIDIRECTIONAL);
- ret = dma_alloc_iova(&umem_odp->state, end - start);
- if (ret)
+ dma_addr = dma_alloc_iova(&umem_odp->state, 0, end - start);
+ if (dma_mapping_error(dev->dma_device, addr)) {
+ ret = -ENOMEM;
goto out_pfn_list;
+ }
ret = mmu_interval_notifier_insert(&umem_odp->notifier,
umem_odp->umem.owning_mm, start,
return SIZE_MAX;
}
-int iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
+dma_addr_t iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
size_t size)
{
struct iommu_domain *domain = iommu_get_dma_domain(state->dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_off = iova_offset(iovad, phys);
dma_addr_t addr;
- size = iova_align(iovad, size + iova_offset(iovad, phys));
+ size = iova_align(iovad, size + iova_off);
addr = __iommu_dma_alloc_iova(domain, size, dma_get_mask(state->dev),
state->dev);
if (!addr)
- return -EINVAL;
+ return DMA_MAPPING_ERROR;
state->addr = addr;
state->size = size;
- return 0;
+ return addr + iova_off;
}
void iommu_dma_free_iova(struct dma_iova_state *state)
*/
}
-dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
- size_t size, unsigned long attrs)
+int iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
+ size_t size, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(state->dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
return ret;
state->range_size += size;
- return addr + iova_off;
+ return 0;
}
static void iommu_sync_dma_for_cpu(struct iommu_domain *domain,
WARN_ON_ONCE(state->dir == DMA_NONE);
- err = dma_alloc_iova(state, npages * PAGE_SIZE);
- if (err)
- return err;
+ mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in, klm_pas_mtt);
dma_set_iova_state(state, page_list[0], PAGE_SIZE);
-
- mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in, klm_pas_mtt);
err = dma_start_range(state);
- if (err) {
- dma_free_iova(state);
+ if (err)
return err;
- }
- for (i = 0; i < npages; i++) {
- if (dma_can_use_iova(state))
- addr = dma_link_range(state, page_to_phys(page_list[i]),
- PAGE_SIZE);
- else
+
+ if (dma_can_use_iova(state)) {
+ addr = dma_alloc_iova(state, 0, npages * PAGE_SIZE);
+ if (dma_mapping_error(mdev->device, addr))
+ return -ENOMEM;
+
+ for (i = 0; i < npages; i++) {
+ err = dma_link_range(state, page_to_phys(page_list[i]),
+ PAGE_SIZE);
+ if (err)
+ goto error;
+ }
+ *mtt++ = cpu_to_be64(addr);
+ addr += PAGE_SIZE;
+ } else {
+ for (i = 0; i < npages; i++) {
addr = dma_map_page(mdev->device, page_list[i], 0,
PAGE_SIZE, state->dir);
- err = dma_mapping_error(mdev->device, addr);
- if (err)
- goto error;
- *mtt++ = cpu_to_be64(addr);
+ err = dma_mapping_error(mdev->device, addr);
+ if (err)
+ goto error;
+ *mtt++ = cpu_to_be64(addr);
+ }
}
dma_end_range(state);
return 0;
}
-int dma_alloc_iova_unaligned(struct dma_iova_state *state, phys_addr_t phys,
- size_t size);
+dma_addr_t dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
+ size_t size);
void dma_free_iova(struct dma_iova_state *state);
void dma_unlink_and_free_iova(struct dma_iova_state *state,
dma_addr_t dma_addr, size_t size);
bool dma_can_use_iova(struct dma_iova_state *state);
int dma_start_range(struct dma_iova_state *state);
void dma_end_range(struct dma_iova_state *state);
-dma_addr_t dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys,
- size_t size, unsigned long attrs);
+int dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys,
+ size_t size, unsigned long attrs);
#else /* CONFIG_HAS_DMA */
-static inline int dma_alloc_iova_unaligned(struct dma_iova_state *state,
- phys_addr_t phys, size_t size)
+static inline dma_addr_t dma_alloc_iova(struct dma_iova_state *state,
+ phys_addr_t phys, size_t size)
{
- return -EOPNOTSUPP;
+ return DMA_MAPPING_ERROR;
}
static inline void dma_free_iova(struct dma_iova_state *state)
{
static inline void dma_end_range(struct dma_iova_state *state)
{
}
-static inline dma_addr_t dma_link_range_attrs(struct dma_iova_state *state,
- phys_addr_t phys, size_t size,
- unsigned long attrs)
+static inline int dma_link_range_attrs(struct dma_iova_state *state,
+ phys_addr_t phys, size_t size, unsigned long attrs)
{
return DMA_MAPPING_ERROR;
}
return false;
}
#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
-static inline int dma_alloc_iova(struct dma_iova_state *state, size_t size)
-{
- return dma_alloc_iova_unaligned(state, 0, size);
-}
-
struct page *dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
void dma_free_pages(struct device *dev, size_t size, struct page *page,
int nelems, enum dma_data_direction dir);
void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir);
-int iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
+dma_addr_t iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
size_t size);
void iommu_dma_free_iova(struct dma_iova_state *state);
void iommu_dma_unlink_and_free_iova(struct dma_iova_state *state,
dma_addr_t dma_addr, size_t size);
int iommu_dma_start_range(struct device *dev);
void iommu_dma_end_range(struct device *dev);
-dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
+int iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
size_t size, unsigned long attrs);
void iommu_dma_unlink_range(struct device *dev, dma_addr_t start, size_t size,
enum dma_data_direction dir, unsigned long attrs);
EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
/**
- * dma_alloc_iova_unaligned - Allocate an IOVA space
+ * dma_alloc_iova - Allocate an IOVA space
* @state: IOVA state
* @phys: physical address
* @size: IOVA size
*
* Allocate an IOVA space for the given IOVA state and size. The IOVA space
* is allocated to the worst case when whole range is going to be used.
+ *
+ * Note: @phys is only used to calculate the IOVA alignent. Callers that always
+ * do IOMMU granule aligned transfers can safely pass 0 here.
+ *
+ * Returns the IOVA to be used for the transfer.
*/
-int dma_alloc_iova_unaligned(struct dma_iova_state *state, phys_addr_t phys,
- size_t size)
+dma_addr_t dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
+ size_t size)
{
- if (!use_dma_iommu(state->dev))
- return 0;
-
- WARN_ON_ONCE(!size);
+ if (WARN_ON_ONCE(!use_dma_iommu(state->dev)))
+ return DMA_MAPPING_ERROR;
+ if (WARN_ON_ONCE(!size))
+ return DMA_MAPPING_ERROR;
return iommu_dma_alloc_iova(state, phys, size);
}
-EXPORT_SYMBOL_GPL(dma_alloc_iova_unaligned);
+EXPORT_SYMBOL_GPL(dma_alloc_iova);
/**
* dma_free_iova - Free an IOVA space
*
* Link a range of IOVA space for the given IOVA state.
*/
-dma_addr_t dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys,
- size_t size, unsigned long attrs)
+int dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys,
+ size_t size, unsigned long attrs)
{
return iommu_dma_link_range(state, phys, size, attrs);
}