]> www.infradead.org Git - users/hch/block.git/commitdiff
RDMA/umem: Prevent UMEM ODP creation with SWIOTLB
authorLeon Romanovsky <leonro@nvidia.com>
Wed, 31 Jan 2024 11:58:56 +0000 (13:58 +0200)
committerLeon Romanovsky <leon@kernel.org>
Thu, 3 Oct 2024 16:05:53 +0000 (19:05 +0300)
RDMA UMEM never supported DMA addresses returned from SWIOTLB, as these
addresses should be programmed to the hardware which is not aware that
it is bounce buffers and not real ones.

Instead of silently leave broken system for the users who didn't
know it, let's be explicit and return an error to them.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
drivers/infiniband/core/umem_odp.c
drivers/iommu/dma-iommu.c

index 7bfa1e54454c1296a5f0c134d86c6e255ba76cc7..58fc3d4bfb73c8ee42f4f0ca11ccc52534b61545 100644 (file)
@@ -42,7 +42,7 @@
 #include <linux/interval_tree.h>
 #include <linux/hmm.h>
 #include <linux/pagemap.h>
-
+#include <linux/iommu-dma.h>
 #include <rdma/ib_umem_odp.h>
 
 #include "uverbs.h"
@@ -51,49 +51,49 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
                                   const struct mmu_interval_notifier_ops *ops)
 {
        struct ib_device *dev = umem_odp->umem.ibdev;
+       size_t page_size = 1UL << umem_odp->page_shift;
+       unsigned long start, end;
+       size_t ndmas, npfns;
        int ret;
 
        umem_odp->umem.is_odp = 1;
        mutex_init(&umem_odp->umem_mutex);
+       if (umem_odp->is_implicit_odp)
+               return 0;
+
+       if (!iommu_can_use_iova(dev->dma_device, NULL, page_size,
+                               DMA_BIDIRECTIONAL))
+               return -EOPNOTSUPP;
+
+       start = ALIGN_DOWN(umem_odp->umem.address, page_size);
+       if (check_add_overflow(umem_odp->umem.address,
+                              (unsigned long)umem_odp->umem.length, &end))
+               return -EOVERFLOW;
+       end = ALIGN(end, page_size);
+       if (unlikely(end < page_size))
+               return -EOVERFLOW;
+
+       ndmas = (end - start) >> umem_odp->page_shift;
+       if (!ndmas)
+               return -EINVAL;
+
+       npfns = (end - start) >> PAGE_SHIFT;
+       umem_odp->pfn_list =
+               kvcalloc(npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
+       if (!umem_odp->pfn_list)
+               return -ENOMEM;
+
+       dma_init_iova_state(&umem_odp->state, dev->dma_device,
+                           DMA_BIDIRECTIONAL);
+       ret = dma_alloc_iova(&umem_odp->state, end - start);
+       if (ret)
+               goto out_pfn_list;
 
-       if (!umem_odp->is_implicit_odp) {
-               size_t page_size = 1UL << umem_odp->page_shift;
-               unsigned long start;
-               unsigned long end;
-               size_t ndmas, npfns;
-
-               start = ALIGN_DOWN(umem_odp->umem.address, page_size);
-               if (check_add_overflow(umem_odp->umem.address,
-                                      (unsigned long)umem_odp->umem.length,
-                                      &end))
-                       return -EOVERFLOW;
-               end = ALIGN(end, page_size);
-               if (unlikely(end < page_size))
-                       return -EOVERFLOW;
-
-               ndmas = (end - start) >> umem_odp->page_shift;
-               if (!ndmas)
-                       return -EINVAL;
-
-               npfns = (end - start) >> PAGE_SHIFT;
-               umem_odp->pfn_list = kvcalloc(
-                       npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
-               if (!umem_odp->pfn_list)
-                       return -ENOMEM;
-
-
-               dma_init_iova_state(&umem_odp->state, dev->dma_device,
-                                   DMA_BIDIRECTIONAL);
-               ret = dma_alloc_iova(&umem_odp->state, end - start);
-               if (ret)
-                       goto out_pfn_list;
-
-               ret = mmu_interval_notifier_insert(&umem_odp->notifier,
-                                                  umem_odp->umem.owning_mm,
-                                                  start, end - start, ops);
-               if (ret)
-                       goto out_free_iova;
-       }
+       ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+                                          umem_odp->umem.owning_mm, start,
+                                          end - start, ops);
+       if (ret)
+               goto out_free_iova;
 
        return 0;
 
index 99af34871e9f804599a57fffefe6c33309cb14be..0483fab229ef0a4890cb91b9bdcde69462a4295c 100644 (file)
@@ -1883,6 +1883,7 @@ bool iommu_can_use_iova(struct device *dev, struct page *page, size_t size,
 
        return true;
 }
+EXPORT_SYMBOL_GPL(iommu_can_use_iova);
 
 void iommu_setup_dma_ops(struct device *dev)
 {