]> www.infradead.org Git - users/hch/block.git/commitdiff
iommu/dma: don't use queued mode in iommu_dma_unlink_range
authorChristoph Hellwig <hch@lst.de>
Sat, 5 Oct 2024 09:06:42 +0000 (11:06 +0200)
committerChristoph Hellwig <hch@lst.de>
Sat, 5 Oct 2024 16:14:39 +0000 (18:14 +0200)
For queued mode to work, the same iotlb_gather must be used for the
unmapping and freeing of the iova.

Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/iommu/dma-iommu.c

index a01a5d9d7ea57bb83840a99ca85156e6079b59d9..dee027833391b2b0390d4b83405006bec57a0b8c 100644 (file)
@@ -1834,28 +1834,22 @@ static void iommu_sync_dma_for_cpu(struct iommu_domain *domain,
        } while (unmapped < size);
 }
 
+
 void iommu_dma_unlink_range(struct device *dev, dma_addr_t start, size_t size,
-                           enum dma_data_direction dir, unsigned long attrs)
+               enum dma_data_direction dir, unsigned long attrs)
 {
        struct iommu_domain *domain = iommu_get_dma_domain(dev);
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
        struct iova_domain *iovad = &cookie->iovad;
-       struct iommu_iotlb_gather iotlb_gather;
        bool coherent = dev_is_dma_coherent(dev);
        size_t unmapped;
 
-       iommu_iotlb_gather_init(&iotlb_gather);
-       iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
-
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !coherent)
                iommu_sync_dma_for_cpu(domain, start, size, dir);
 
        size = iova_align(iovad, size);
-       unmapped = iommu_unmap_fast(domain, start, size, &iotlb_gather);
+       unmapped = iommu_unmap_fast(domain, start, size, NULL);
        WARN_ON(unmapped != size);
-
-       if (!iotlb_gather.queued)
-               iommu_iotlb_sync(domain, &iotlb_gather);
 }
 
 bool iommu_can_use_iova(struct device *dev, struct page *page, size_t size,