From 4efd58fca48c18675130589882ca529988ede443 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 5 Oct 2024 11:40:18 +0200 Subject: [PATCH] dma-mapping: change the dma_alloc_iova / dma_link_range API Return the IOVA passed to the hardware from dma_alloc_iova, as for normal SGLish-API that's all the driver needs, and stop returning a dma_addr_t from dma_link_range, as it will constant offsets from the original allocation. Signed-off-by: Christoph Hellwig --- drivers/infiniband/core/umem_odp.c | 7 ++++-- drivers/iommu/dma-iommu.c | 15 +++++------ drivers/vfio/pci/mlx5/cmd.c | 40 +++++++++++++++++------------- include/linux/dma-mapping.h | 24 +++++++----------- include/linux/iommu-dma.h | 4 +-- kernel/dma/mapping.c | 25 +++++++++++-------- 6 files changed, 62 insertions(+), 53 deletions(-) diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 58fc3d4bfb73..133f00d3e125 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -54,6 +54,7 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, size_t page_size = 1UL << umem_odp->page_shift; unsigned long start, end; size_t ndmas, npfns; + dma_addr_t dma_addr; int ret; umem_odp->umem.is_odp = 1; @@ -85,9 +86,11 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, dma_init_iova_state(&umem_odp->state, dev->dma_device, DMA_BIDIRECTIONAL); - ret = dma_alloc_iova(&umem_odp->state, end - start); - if (ret) + dma_addr = dma_alloc_iova(&umem_odp->state, 0, end - start); + if (dma_mapping_error(dev->dma_device, addr)) { + ret = -ENOMEM; goto out_pfn_list; + } ret = mmu_interval_notifier_insert(&umem_odp->notifier, umem_odp->umem.owning_mm, start, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index ce5b54dac513..c9185c96f967 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1745,23 +1745,24 @@ size_t iommu_dma_max_mapping_size(struct device *dev) return SIZE_MAX; } -int iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys, +dma_addr_t iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys, size_t size) { struct iommu_domain *domain = iommu_get_dma_domain(state->dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; + size_t iova_off = iova_offset(iovad, phys); dma_addr_t addr; - size = iova_align(iovad, size + iova_offset(iovad, phys)); + size = iova_align(iovad, size + iova_off); addr = __iommu_dma_alloc_iova(domain, size, dma_get_mask(state->dev), state->dev); if (!addr) - return -EINVAL; + return DMA_MAPPING_ERROR; state->addr = addr; state->size = size; - return 0; + return addr + iova_off; } void iommu_dma_free_iova(struct dma_iova_state *state) @@ -1789,8 +1790,8 @@ void iommu_dma_end_range(struct device *dev) */ } -dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys, - size_t size, unsigned long attrs) +int iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys, + size_t size, unsigned long attrs) { struct iommu_domain *domain = iommu_get_dma_domain(state->dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; @@ -1812,7 +1813,7 @@ dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys, return ret; state->range_size += size; - return addr + iova_off; + return 0; } static void iommu_sync_dma_for_cpu(struct iommu_domain *domain, diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c index 1cb966c094f9..d17d1a090542 100644 --- a/drivers/vfio/pci/mlx5/cmd.c +++ b/drivers/vfio/pci/mlx5/cmd.c @@ -385,29 +385,35 @@ static int register_dma_pages(struct mlx5_core_dev *mdev, u32 npages, WARN_ON_ONCE(state->dir == DMA_NONE); - err = dma_alloc_iova(state, npages * PAGE_SIZE); - if (err) - return err; + mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in, klm_pas_mtt); dma_set_iova_state(state, page_list[0], PAGE_SIZE); - - mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in, klm_pas_mtt); err = dma_start_range(state); - if (err) { - dma_free_iova(state); + if (err) return err; - } - for (i = 0; i < npages; i++) { - if (dma_can_use_iova(state)) - addr = dma_link_range(state, page_to_phys(page_list[i]), - PAGE_SIZE); - else + + if (dma_can_use_iova(state)) { + addr = dma_alloc_iova(state, 0, npages * PAGE_SIZE); + if (dma_mapping_error(mdev->device, addr)) + return -ENOMEM; + + for (i = 0; i < npages; i++) { + err = dma_link_range(state, page_to_phys(page_list[i]), + PAGE_SIZE); + if (err) + goto error; + } + *mtt++ = cpu_to_be64(addr); + addr += PAGE_SIZE; + } else { + for (i = 0; i < npages; i++) { addr = dma_map_page(mdev->device, page_list[i], 0, PAGE_SIZE, state->dir); - err = dma_mapping_error(mdev->device, addr); - if (err) - goto error; - *mtt++ = cpu_to_be64(addr); + err = dma_mapping_error(mdev->device, addr); + if (err) + goto error; + *mtt++ = cpu_to_be64(addr); + } } dma_end_range(state); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index bfbd1092980c..3924ba73231b 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -120,8 +120,8 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return 0; } -int dma_alloc_iova_unaligned(struct dma_iova_state *state, phys_addr_t phys, - size_t size); +dma_addr_t dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys, + size_t size); void dma_free_iova(struct dma_iova_state *state); void dma_unlink_and_free_iova(struct dma_iova_state *state, dma_addr_t dma_addr, size_t size); @@ -183,13 +183,13 @@ void dma_set_iova_state(struct dma_iova_state *state, struct page *page, bool dma_can_use_iova(struct dma_iova_state *state); int dma_start_range(struct dma_iova_state *state); void dma_end_range(struct dma_iova_state *state); -dma_addr_t dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys, - size_t size, unsigned long attrs); +int dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys, + size_t size, unsigned long attrs); #else /* CONFIG_HAS_DMA */ -static inline int dma_alloc_iova_unaligned(struct dma_iova_state *state, - phys_addr_t phys, size_t size) +static inline dma_addr_t dma_alloc_iova(struct dma_iova_state *state, + phys_addr_t phys, size_t size) { - return -EOPNOTSUPP; + return DMA_MAPPING_ERROR; } static inline void dma_free_iova(struct dma_iova_state *state) { @@ -353,9 +353,8 @@ static inline int dma_start_range(struct dma_iova_state *state) static inline void dma_end_range(struct dma_iova_state *state) { } -static inline dma_addr_t dma_link_range_attrs(struct dma_iova_state *state, - phys_addr_t phys, size_t size, - unsigned long attrs) +static inline int dma_link_range_attrs(struct dma_iova_state *state, + phys_addr_t phys, size_t size, unsigned long attrs) { return DMA_MAPPING_ERROR; } @@ -436,11 +435,6 @@ static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) return false; } #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ -static inline int dma_alloc_iova(struct dma_iova_state *state, size_t size) -{ - return dma_alloc_iova_unaligned(state, 0, size); -} - struct page *dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); void dma_free_pages(struct device *dev, size_t size, struct page *page, diff --git a/include/linux/iommu-dma.h b/include/linux/iommu-dma.h index 90c8b4a02498..f2550efb098b 100644 --- a/include/linux/iommu-dma.h +++ b/include/linux/iommu-dma.h @@ -66,14 +66,14 @@ void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir); void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir); -int iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys, +dma_addr_t iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys, size_t size); void iommu_dma_free_iova(struct dma_iova_state *state); void iommu_dma_unlink_and_free_iova(struct dma_iova_state *state, dma_addr_t dma_addr, size_t size); int iommu_dma_start_range(struct device *dev); void iommu_dma_end_range(struct device *dev); -dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys, +int iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys, size_t size, unsigned long attrs); void iommu_dma_unlink_range(struct device *dev, dma_addr_t start, size_t size, enum dma_data_direction dir, unsigned long attrs); diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 152078d0ffe2..0410aaf49dde 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -967,24 +967,29 @@ unsigned long dma_get_merge_boundary(struct device *dev) EXPORT_SYMBOL_GPL(dma_get_merge_boundary); /** - * dma_alloc_iova_unaligned - Allocate an IOVA space + * dma_alloc_iova - Allocate an IOVA space * @state: IOVA state * @phys: physical address * @size: IOVA size * * Allocate an IOVA space for the given IOVA state and size. The IOVA space * is allocated to the worst case when whole range is going to be used. + * + * Note: @phys is only used to calculate the IOVA alignent. Callers that always + * do IOMMU granule aligned transfers can safely pass 0 here. + * + * Returns the IOVA to be used for the transfer. */ -int dma_alloc_iova_unaligned(struct dma_iova_state *state, phys_addr_t phys, - size_t size) +dma_addr_t dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys, + size_t size) { - if (!use_dma_iommu(state->dev)) - return 0; - - WARN_ON_ONCE(!size); + if (WARN_ON_ONCE(!use_dma_iommu(state->dev))) + return DMA_MAPPING_ERROR; + if (WARN_ON_ONCE(!size)) + return DMA_MAPPING_ERROR; return iommu_dma_alloc_iova(state, phys, size); } -EXPORT_SYMBOL_GPL(dma_alloc_iova_unaligned); +EXPORT_SYMBOL_GPL(dma_alloc_iova); /** * dma_free_iova - Free an IOVA space @@ -1085,8 +1090,8 @@ EXPORT_SYMBOL_GPL(dma_end_range); * * Link a range of IOVA space for the given IOVA state. */ -dma_addr_t dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys, - size_t size, unsigned long attrs) +int dma_link_range_attrs(struct dma_iova_state *state, phys_addr_t phys, + size_t size, unsigned long attrs) { return iommu_dma_link_range(state, phys, size, attrs); } -- 2.49.0