} while (unmapped < size);
}
+
void iommu_dma_unlink_range(struct device *dev, dma_addr_t start, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
- struct iommu_iotlb_gather iotlb_gather;
bool coherent = dev_is_dma_coherent(dev);
size_t unmapped;
- iommu_iotlb_gather_init(&iotlb_gather);
- iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
-
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !coherent)
iommu_sync_dma_for_cpu(domain, start, size, dir);
size = iova_align(iovad, size);
- unmapped = iommu_unmap_fast(domain, start, size, &iotlb_gather);
+ unmapped = iommu_unmap_fast(domain, start, size, NULL);
WARN_ON(unmapped != size);
-
- if (!iotlb_gather.queued)
- iommu_iotlb_sync(domain, &iotlb_gather);
}
bool iommu_can_use_iova(struct device *dev, struct page *page, size_t size,