]> www.infradead.org Git - users/hch/misc.git/commitdiff
iommu/dma: Factor out a iommu_dma_map_swiotlb helper
authorChristoph Hellwig <hch@lst.de>
Mon, 5 May 2025 07:01:43 +0000 (10:01 +0300)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Tue, 6 May 2025 06:36:53 +0000 (08:36 +0200)
Split the iommu logic from iommu_dma_map_page into a separate helper.
This not only keeps the code neatly separated, but will also allow for
reuse in another caller.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
drivers/iommu/dma-iommu.c

index 6ca9305a26cc7401ebd6f3b68ff8be1e64c5b178..d2c298083e0aab2cb8f53c42107639f1fb4e0dbf 100644 (file)
@@ -1138,6 +1138,43 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
                        arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
 }
 
+static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+       struct iommu_domain *domain = iommu_get_dma_domain(dev);
+       struct iova_domain *iovad = &domain->iova_cookie->iovad;
+
+       if (!is_swiotlb_active(dev)) {
+               dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
+               return (phys_addr_t)DMA_MAPPING_ERROR;
+       }
+
+       trace_swiotlb_bounced(dev, phys, size);
+
+       phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir,
+                       attrs);
+
+       /*
+        * Untrusted devices should not see padding areas with random leftover
+        * kernel data, so zero the pre- and post-padding.
+        * swiotlb_tbl_map_single() has initialized the bounce buffer proper to
+        * the contents of the original memory buffer.
+        */
+       if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
+               size_t start, virt = (size_t)phys_to_virt(phys);
+
+               /* Pre-padding */
+               start = iova_align_down(iovad, virt);
+               memset((void *)start, 0, virt - start);
+
+               /* Post-padding */
+               start = virt + size;
+               memset((void *)start, 0, iova_align(iovad, start) - start);
+       }
+
+       return phys;
+}
+
 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
              unsigned long offset, size_t size, enum dma_data_direction dir,
              unsigned long attrs)
@@ -1151,42 +1188,14 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
        dma_addr_t iova, dma_mask = dma_get_mask(dev);
 
        /*
-        * If both the physical buffer start address and size are
-        * page aligned, we don't need to use a bounce page.
+        * If both the physical buffer start address and size are page aligned,
+        * we don't need to use a bounce page.
         */
        if (dev_use_swiotlb(dev, size, dir) &&
            iova_offset(iovad, phys | size)) {
-               if (!is_swiotlb_active(dev)) {
-                       dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
-                       return DMA_MAPPING_ERROR;
-               }
-
-               trace_swiotlb_bounced(dev, phys, size);
-
-               phys = swiotlb_tbl_map_single(dev, phys, size,
-                                             iova_mask(iovad), dir, attrs);
-
-               if (phys == DMA_MAPPING_ERROR)
+               phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
+               if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
                        return DMA_MAPPING_ERROR;
-
-               /*
-                * Untrusted devices should not see padding areas with random
-                * leftover kernel data, so zero the pre- and post-padding.
-                * swiotlb_tbl_map_single() has initialized the bounce buffer
-                * proper to the contents of the original memory buffer.
-                */
-               if (dev_is_untrusted(dev)) {
-                       size_t start, virt = (size_t)phys_to_virt(phys);
-
-                       /* Pre-padding */
-                       start = iova_align_down(iovad, virt);
-                       memset((void *)start, 0, virt - start);
-
-                       /* Post-padding */
-                       start = virt + size;
-                       memset((void *)start, 0,
-                              iova_align(iovad, start) - start);
-               }
        }
 
        if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))