]> www.infradead.org Git - users/hch/misc.git/commitdiff
dma-mapping: implement DMA_ATTR_MMIO for dma_(un)map_page_attrs()
authorLeon Romanovsky <leonro@nvidia.com>
Tue, 9 Sep 2025 13:27:37 +0000 (16:27 +0300)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Thu, 11 Sep 2025 22:18:20 +0000 (00:18 +0200)
Make dma_map_page_attrs() and dma_map_page_attrs() respect
DMA_ATTR_MMIO.

DMA_ATR_MMIO makes the functions behave the same as
dma_(un)map_resource():
 - No swiotlb is possible
 - Legacy dma_ops arches use ops->map_resource()
 - No kmsan
 - No arch_dma_map_phys_direct()

The prior patches have made the internal functions called here
support DMA_ATTR_MMIO.

This is also preparation for turning dma_map_resource() into an inline
calling dma_map_phys(DMA_ATTR_MMIO) to consolidate the flows.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/3660e2c78ea409d6c483a215858fb3af52cd0ed3.1757423202.git.leonro@nvidia.com
kernel/dma/mapping.c

index e47bcf7cc43d74401465ac0f6b6eef358d2c50d8..95eab531e227391c568aa89f1cd042865f6e0058 100644 (file)
@@ -158,6 +158,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
        phys_addr_t phys = page_to_phys(page) + offset;
+       bool is_mmio = attrs & DMA_ATTR_MMIO;
        dma_addr_t addr;
 
        BUG_ON(!valid_dma_direction(dir));
@@ -166,14 +167,25 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
                return DMA_MAPPING_ERROR;
 
        if (dma_map_direct(dev, ops) ||
-           arch_dma_map_phys_direct(dev, phys + size))
+           (!is_mmio && arch_dma_map_phys_direct(dev, phys + size)))
                addr = dma_direct_map_phys(dev, phys, size, dir, attrs);
        else if (use_dma_iommu(dev))
                addr = iommu_dma_map_phys(dev, phys, size, dir, attrs);
-       else
+       else if (is_mmio) {
+               if (!ops->map_resource)
+                       return DMA_MAPPING_ERROR;
+
+               addr = ops->map_resource(dev, phys, size, dir, attrs);
+       } else {
+               /*
+                * The dma_ops API contract for ops->map_page() requires
+                * kmappable memory, while ops->map_resource() does not.
+                */
                addr = ops->map_page(dev, page, offset, size, dir, attrs);
+       }
 
-       kmsan_handle_dma(phys, size, dir);
+       if (!is_mmio)
+               kmsan_handle_dma(phys, size, dir);
        trace_dma_map_phys(dev, phys, addr, size, dir, attrs);
        debug_dma_map_phys(dev, phys, size, dir, addr, attrs);
 
@@ -185,14 +197,18 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
                enum dma_data_direction dir, unsigned long attrs)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
+       bool is_mmio = attrs & DMA_ATTR_MMIO;
 
        BUG_ON(!valid_dma_direction(dir));
        if (dma_map_direct(dev, ops) ||
-           arch_dma_unmap_phys_direct(dev, addr + size))
+           (!is_mmio && arch_dma_unmap_phys_direct(dev, addr + size)))
                dma_direct_unmap_phys(dev, addr, size, dir, attrs);
        else if (use_dma_iommu(dev))
                iommu_dma_unmap_phys(dev, addr, size, dir, attrs);
-       else
+       else if (is_mmio) {
+               if (ops->unmap_resource)
+                       ops->unmap_resource(dev, addr, size, dir, attrs);
+       } else
                ops->unmap_page(dev, addr, size, dir, attrs);
        trace_dma_unmap_phys(dev, addr, size, dir, attrs);
        debug_dma_unmap_phys(dev, addr, size, dir);