len = (j - i) << PAGE_SHIFT;
                ret = iommu_map(mapping->domain, iova, phys, len,
-                               __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
+                               __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs),
+                               GFP_KERNEL);
                if (ret < 0)
                        goto fail;
                iova += len;
 
                prot = __dma_info_to_prot(dir, attrs);
 
-               ret = iommu_map(mapping->domain, iova, phys, len, prot);
+               ret = iommu_map(mapping->domain, iova, phys, len, prot,
+                               GFP_KERNEL);
                if (ret < 0)
                        goto fail;
                count += len >> PAGE_SHIFT;
 
        prot = __dma_info_to_prot(dir, attrs);
 
-       ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
+       ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
+                       prot, GFP_KERNEL);
        if (ret < 0)
                goto fail;
 
 
        prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
 
-       ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
+       ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
        if (ret < 0)
                goto fail;
 
 
                u32 offset = (r->offset + i) << imem->iommu_pgshift;
 
                ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
-                               PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
+                               PAGE_SIZE, IOMMU_READ | IOMMU_WRITE,
+                               GFP_KERNEL);
                if (ret < 0) {
                        nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
 
 
 
        *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
        err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
-                       size, IOMMU_READ | IOMMU_WRITE);
+                       size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
        if (err < 0)
                goto free_iova;
 
 
 
                pb->dma = iova_dma_addr(&host1x->iova, alloc);
                err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
-                               IOMMU_READ);
+                               IOMMU_READ, GFP_KERNEL);
                if (err)
                        goto iommu_free_iova;
        } else {
 
                                usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
                                        va_start, &pa_start, size, flags);
                                err = iommu_map(pd->domain, va_start, pa_start,
-                                                       size, flags);
+                                               size, flags, GFP_KERNEL);
                                if (err) {
                                        usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
                                                va_start, &pa_start, size, err);
                                usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
                                        va_start, &pa_start, size, flags);
                                err = iommu_map(pd->domain, va_start, pa_start,
-                                               size, flags);
+                                               size, flags, GFP_KERNEL);
                                if (err) {
                                        usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
                                                va_start, &pa_start, size, err);
 
        if (!iova)
                goto out_free_page;
 
-       if (iommu_map(domain, iova, msi_addr, size, prot))
+       if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
                goto out_free_iova;
 
        INIT_LIST_HEAD(&msi_page->list);
 
                        if (map_size) {
                                ret = iommu_map(domain, addr - map_size,
                                                addr - map_size, map_size,
-                                               entry->prot);
+                                               entry->prot, GFP_KERNEL);
                                if (ret)
                                        goto out;
                                map_size = 0;
        return ret;
 }
 
-static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
-                     phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+             phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
        const struct iommu_domain_ops *ops = domain->ops;
        int ret;
 
+       might_sleep_if(gfpflags_allow_blocking(gfp));
+
+       /* Discourage passing strange GFP flags */
+       if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
+                               __GFP_HIGHMEM)))
+               return -EINVAL;
+
        ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
        if (ret == 0 && ops->iotlb_sync_map)
                ops->iotlb_sync_map(domain, iova, size);
 
        return ret;
 }
-
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
-             phys_addr_t paddr, size_t size, int prot)
-{
-       might_sleep();
-       return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
-}
 EXPORT_SYMBOL_GPL(iommu_map);
 
 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
              phys_addr_t paddr, size_t size, int prot)
 {
-       return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+       return iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
 }
 EXPORT_SYMBOL_GPL(iommu_map_atomic);
 
 
                        size % PAGE_SIZE);
 
        while (size) {
-               rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot);
+               rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot,
+                              GFP_KERNEL);
                if (rc)
                        goto err_unmap;
                iova += PAGE_SIZE;
                else
                        rc = iommu_map(domain, iova,
                                       PFN_PHYS(batch->pfns[cur]) + page_offset,
-                                      next_iova - iova, area->iommu_prot);
+                                      next_iova - iova, area->iommu_prot,
+                                      GFP_KERNEL);
                if (rc)
                        goto err_unmap;
                iova = next_iova;
 
        core->fw.mapped_mem_size = mem_size;
 
        ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size,
-                       IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV);
+                       IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV, GFP_KERNEL);
        if (ret) {
                dev_err(dev, "could not map video firmware region\n");
                return ret;
 
        size = PAGE_ALIGN(size + addr - phys);
        iova = phys;    /* We just want a direct mapping */
 
-       ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
+       ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
+                       GFP_KERNEL);
        if (ret)
                return ret;
 
        size = PAGE_ALIGN(size + addr - phys);
        iova = phys;    /* We just want a direct mapping */
 
-       ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
+       ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
+                       GFP_KERNEL);
        if (ret)
                return ret;
 
 
 
        ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
                        ar->msa.paddr, ar->msa.mem_size,
-                       IOMMU_READ | IOMMU_WRITE);
+                       IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
        if (ret) {
                ath10k_err(ar, "failed to map firmware region: %d\n", ret);
                goto err_iommu_detach;
 
 
        ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
                        ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
-                       IOMMU_READ | IOMMU_WRITE);
+                       IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
        if (ret) {
                ath11k_err(ab, "failed to map firmware region: %d\n", ret);
                goto err_iommu_detach;
 
        ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
                        ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
-                       IOMMU_READ | IOMMU_WRITE);
+                       IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
        if (ret) {
                ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
                goto err_iommu_unmap;
 
        if (!mapping)
                return -ENOMEM;
 
-       ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
+       ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags,
+                       GFP_KERNEL);
        if (ret) {
                dev_err(dev, "failed to map devmem: %d\n", ret);
                goto out;
                }
 
                ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
-                               mem->flags);
+                               mem->flags, GFP_KERNEL);
                if (ret) {
                        dev_err(dev, "iommu_map failed: %d\n", ret);
                        goto free_mapping;
 
 
        list_for_each_entry(d, &iommu->domain_list, next) {
                ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
-                               npage << PAGE_SHIFT, prot | IOMMU_CACHE);
+                               npage << PAGE_SHIFT, prot | IOMMU_CACHE,
+                               GFP_KERNEL);
                if (ret)
                        goto unwind;
 
                                size = npage << PAGE_SHIFT;
                        }
 
-                       ret = iommu_map(domain->domain, iova, phys,
-                                       size, dma->prot | IOMMU_CACHE);
+                       ret = iommu_map(domain->domain, iova, phys, size,
+                                       dma->prot | IOMMU_CACHE, GFP_KERNEL);
                        if (ret) {
                                if (!dma->iommu_mapped) {
                                        vfio_unpin_pages_remote(dma, iova,
                return;
 
        ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
-                       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+                       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
        if (!ret) {
                size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
 
 
                        r = ops->set_map(vdpa, asid, iotlb);
        } else {
                r = iommu_map(v->domain, iova, pa, size,
-                             perm_to_iommu_flags(perm));
+                             perm_to_iommu_flags(perm), GFP_KERNEL);
        }
        if (r) {
                vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
 
 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
-                    phys_addr_t paddr, size_t size, int prot);
+                    phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
 extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
                            phys_addr_t paddr, size_t size, int prot);
 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 }
 
 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
-                           phys_addr_t paddr, size_t size, int prot)
+                           phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
        return -ENODEV;
 }