{
const struct dma_map_ops *ops = get_dma_ops(dev);
phys_addr_t phys = page_to_phys(page) + offset;
+ bool is_mmio = attrs & DMA_ATTR_MMIO;
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
return DMA_MAPPING_ERROR;
if (dma_map_direct(dev, ops) ||
- arch_dma_map_phys_direct(dev, phys + size))
+ (!is_mmio && arch_dma_map_phys_direct(dev, phys + size)))
addr = dma_direct_map_phys(dev, phys, size, dir, attrs);
else if (use_dma_iommu(dev))
addr = iommu_dma_map_phys(dev, phys, size, dir, attrs);
- else
+ else if (is_mmio) {
+ if (!ops->map_resource)
+ return DMA_MAPPING_ERROR;
+
+ addr = ops->map_resource(dev, phys, size, dir, attrs);
+ } else {
+ /*
+ * The dma_ops API contract for ops->map_page() requires
+ * kmappable memory, while ops->map_resource() does not.
+ */
addr = ops->map_page(dev, page, offset, size, dir, attrs);
+ }
- kmsan_handle_dma(phys, size, dir);
+ if (!is_mmio)
+ kmsan_handle_dma(phys, size, dir);
trace_dma_map_phys(dev, phys, addr, size, dir, attrs);
debug_dma_map_phys(dev, phys, size, dir, addr, attrs);
enum dma_data_direction dir, unsigned long attrs)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
+ bool is_mmio = attrs & DMA_ATTR_MMIO;
BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops) ||
- arch_dma_unmap_phys_direct(dev, addr + size))
+ (!is_mmio && arch_dma_unmap_phys_direct(dev, addr + size)))
dma_direct_unmap_phys(dev, addr, size, dir, attrs);
else if (use_dma_iommu(dev))
iommu_dma_unmap_phys(dev, addr, size, dir, attrs);
- else
+ else if (is_mmio) {
+ if (ops->unmap_resource)
+ ops->unmap_resource(dev, addr, size, dir, attrs);
+ } else
ops->unmap_page(dev, addr, size, dir, attrs);
trace_dma_unmap_phys(dev, addr, size, dir, attrs);
debug_dma_unmap_phys(dev, addr, size, dir);