int iommu_id;
 
        /* si_domain and vm domain should not get here. */
-       if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
+       if (WARN_ON(!iommu_is_dma_domain(&domain->domain)))
                return NULL;
 
        for_each_domain_iommu(iommu_id, domain)
                        pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
                        if (domain_use_first_level(domain)) {
                                pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
-                               if (domain->domain.type == IOMMU_DOMAIN_DMA)
+                               if (iommu_is_dma_domain(&domain->domain))
                                        pteval |= DMA_FL_PTE_ACCESS;
                        }
                        if (cmpxchg64(&pte->val, 0ULL, pteval))
        if (domain_use_first_level(domain)) {
                attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
 
-               if (domain->domain.type == IOMMU_DOMAIN_DMA) {
+               if (iommu_is_dma_domain(&domain->domain)) {
                        attr |= DMA_FL_PTE_ACCESS;
                        if (prot & DMA_PTE_WRITE)
                                attr |= DMA_FL_PTE_DIRTY;
 
        switch (type) {
        case IOMMU_DOMAIN_DMA:
+       case IOMMU_DOMAIN_DMA_FQ:
        case IOMMU_DOMAIN_UNMANAGED:
                dmar_domain = alloc_domain(0);
                if (!dmar_domain) {
 
 static void intel_iommu_probe_finalize(struct device *dev)
 {
-       struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-
-       if (domain && domain->type == IOMMU_DOMAIN_DMA)
-               iommu_setup_dma_ops(dev, 0, U64_MAX);
-       else
-               set_dma_ops(dev, NULL);
+       set_dma_ops(dev, NULL);
+       iommu_setup_dma_ops(dev, 0, U64_MAX);
 }
 
 static void intel_iommu_get_resv_regions(struct device *device,