return acpihid_device_group(dev);
 }
 
-static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
-               enum iommu_attr attr, void *data)
-{
-       switch (domain->type) {
-       case IOMMU_DOMAIN_UNMANAGED:
-               return -ENODEV;
-       case IOMMU_DOMAIN_DMA:
-               switch (attr) {
-               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
-                       *(int *)data = !amd_iommu_unmap_flush;
-                       return 0;
-               default:
-                       return -ENODEV;
-               }
-               break;
-       default:
-               return -EINVAL;
-       }
-}
-
 /*****************************************************************************
  *
  * The next functions belong to the dma_ops mapping/unmapping code.
                pr_info("IO/TLB flush on unmap enabled\n");
        else
                pr_info("Lazy IO/TLB flushing enabled\n");
-
+       iommu_set_dma_strict(amd_iommu_unmap_flush);
        return 0;
 
 }
        .release_device = amd_iommu_release_device,
        .probe_finalize = amd_iommu_probe_finalize,
        .device_group = amd_iommu_device_group,
-       .domain_get_attr = amd_iommu_domain_get_attr,
        .get_resv_regions = amd_iommu_get_resv_regions,
        .put_resv_regions = generic_iommu_put_resv_regions,
        .is_attach_deferred = amd_iommu_is_attach_deferred,
 
                .iommu_dev      = smmu->dev,
        };
 
-       if (smmu_domain->non_strict)
+       if (!iommu_get_dma_strict(domain))
                pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
 
        pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
        return group;
 }
 
-static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
-                                   enum iommu_attr attr, void *data)
-{
-       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
-       switch (domain->type) {
-       case IOMMU_DOMAIN_DMA:
-               switch (attr) {
-               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
-                       *(int *)data = smmu_domain->non_strict;
-                       return 0;
-               default:
-                       return -ENODEV;
-               }
-               break;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
-                                   enum iommu_attr attr, void *data)
-{
-       int ret = 0;
-       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
-       mutex_lock(&smmu_domain->init_mutex);
-
-       switch (domain->type) {
-       case IOMMU_DOMAIN_DMA:
-               switch(attr) {
-               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
-                       smmu_domain->non_strict = *(int *)data;
-                       break;
-               default:
-                       ret = -ENODEV;
-               }
-               break;
-       default:
-               ret = -EINVAL;
-       }
-
-       mutex_unlock(&smmu_domain->init_mutex);
-       return ret;
-}
-
 static int arm_smmu_enable_nesting(struct iommu_domain *domain)
 {
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        .probe_device           = arm_smmu_probe_device,
        .release_device         = arm_smmu_release_device,
        .device_group           = arm_smmu_device_group,
-       .domain_get_attr        = arm_smmu_domain_get_attr,
-       .domain_set_attr        = arm_smmu_domain_set_attr,
        .enable_nesting         = arm_smmu_enable_nesting,
        .of_xlate               = arm_smmu_of_xlate,
        .get_resv_regions       = arm_smmu_get_resv_regions,
 
        struct mutex                    init_mutex; /* Protects smmu pointer */
 
        struct io_pgtable_ops           *pgtbl_ops;
-       bool                            non_strict;
        atomic_t                        nr_ats_masters;
 
        enum arm_smmu_domain_stage      stage;
 
                .iommu_dev      = smmu->dev,
        };
 
+       if (!iommu_get_dma_strict(domain))
+               pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+
        if (smmu->impl && smmu->impl->init_context) {
                ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
                if (ret)
                        return -ENODEV;
                }
                break;
-       case IOMMU_DOMAIN_DMA:
-               switch (attr) {
-               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: {
-                       bool non_strict = smmu_domain->pgtbl_cfg.quirks &
-                                         IO_PGTABLE_QUIRK_NON_STRICT;
-                       *(int *)data = non_strict;
-                       return 0;
-               }
-               default:
-                       return -ENODEV;
-               }
-               break;
        default:
                return -EINVAL;
        }
                        ret = -ENODEV;
                }
                break;
-       case IOMMU_DOMAIN_DMA:
-               switch (attr) {
-               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
-                       if (*(int *)data)
-                               smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
-                       else
-                               smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT;
-                       break;
-               default:
-                       ret = -ENODEV;
-               }
-               break;
        default:
                ret = -EINVAL;
        }
 
 
        cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
        domain = cookie->fq_domain;
-       /*
-        * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
-        * implies that ops->flush_iotlb_all must be non-NULL.
-        */
+
        domain->ops->flush_iotlb_all(domain);
 }
 
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
        unsigned long order, base_pfn;
        struct iova_domain *iovad;
-       int attr;
 
        if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
                return -EINVAL;
        init_iova_domain(iovad, 1UL << order, base_pfn);
 
        if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) &&
-           !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) &&
-           attr) {
+           domain->ops->flush_iotlb_all && !iommu_get_dma_strict(domain)) {
                if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
                                          iommu_dma_entry_dtor))
                        pr_warn("iova flush queue initialization failed\n");
 
 
        down_read(&dmar_global_lock);
        for_each_active_iommu(iommu, drhd) {
+               /*
+                * The flush queue implementation does not perform
+                * page-selective invalidations that are required for efficient
+                * TLB flushes in virtual environments.  The benefit of batching
+                * is likely to be much lower than the overhead of synchronizing
+                * the virtual and physical IOMMU page-tables.
+                */
+               if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
+                       pr_warn("IOMMU batching is disabled due to virtualization");
+                       intel_iommu_strict = 1;
+               }
                iommu_device_sysfs_add(&iommu->iommu, NULL,
                                       intel_iommu_groups,
                                       "%s", iommu->name);
        }
        up_read(&dmar_global_lock);
 
+       iommu_set_dma_strict(intel_iommu_strict);
        bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
        if (si_domain && !hw_pass_through)
                register_memory_notifier(&intel_iommu_memory_nb);
        return ret;
 }
 
-static bool domain_use_flush_queue(void)
-{
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
-       bool r = true;
-
-       if (intel_iommu_strict)
-               return false;
-
-       /*
-        * The flush queue implementation does not perform page-selective
-        * invalidations that are required for efficient TLB flushes in virtual
-        * environments. The benefit of batching is likely to be much lower than
-        * the overhead of synchronizing the virtual and physical IOMMU
-        * page-tables.
-        */
-       rcu_read_lock();
-       for_each_active_iommu(iommu, drhd) {
-               if (!cap_caching_mode(iommu->cap))
-                       continue;
-
-               pr_warn_once("IOMMU batching is disabled due to virtualization");
-               r = false;
-               break;
-       }
-       rcu_read_unlock();
-
-       return r;
-}
-
-static int
-intel_iommu_domain_get_attr(struct iommu_domain *domain,
-                           enum iommu_attr attr, void *data)
-{
-       switch (domain->type) {
-       case IOMMU_DOMAIN_UNMANAGED:
-               return -ENODEV;
-       case IOMMU_DOMAIN_DMA:
-               switch (attr) {
-               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
-                       *(int *)data = domain_use_flush_queue();
-                       return 0;
-               default:
-                       return -ENODEV;
-               }
-               break;
-       default:
-               return -EINVAL;
-       }
-}
-
 /*
  * Check that the device does not live on an external facing PCI port that is
  * marked as untrusted. Such devices should not be able to apply quirks and
        .capable                = intel_iommu_capable,
        .domain_alloc           = intel_iommu_domain_alloc,
        .domain_free            = intel_iommu_domain_free,
-       .domain_get_attr        = intel_iommu_domain_get_attr,
        .enable_nesting         = intel_iommu_enable_nesting,
        .attach_dev             = intel_iommu_attach_device,
        .detach_dev             = intel_iommu_detach_device,
 
 };
 
 #define IOMMU_CMD_LINE_DMA_API         BIT(0)
+#define IOMMU_CMD_LINE_STRICT          BIT(1)
 
 static int iommu_alloc_default_domain(struct iommu_group *group,
                                      struct device *dev);
 
 static int __init iommu_dma_setup(char *str)
 {
-       return kstrtobool(str, &iommu_dma_strict);
+       int ret = kstrtobool(str, &iommu_dma_strict);
+
+       if (!ret)
+               iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
+       return ret;
 }
 early_param("iommu.strict", iommu_dma_setup);
 
+void iommu_set_dma_strict(bool strict)
+{
+       if (strict || !(iommu_cmd_line & IOMMU_CMD_LINE_STRICT))
+               iommu_dma_strict = strict;
+}
+
+bool iommu_get_dma_strict(struct iommu_domain *domain)
+{
+       /* only allow lazy flushing for DMA domains */
+       if (domain->type == IOMMU_DOMAIN_DMA)
+               return iommu_dma_strict;
+       return true;
+}
+EXPORT_SYMBOL_GPL(iommu_get_dma_strict);
+
 static ssize_t iommu_group_attr_show(struct kobject *kobj,
                                     struct attribute *__attr, char *buf)
 {
        group->default_domain = dom;
        if (!group->domain)
                group->domain = dom;
-
-       if (!iommu_dma_strict) {
-               int attr = 1;
-               iommu_domain_set_attr(dom,
-                                     DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
-                                     &attr);
-       }
-
        return 0;
 }
 
 
  */
 
 enum iommu_attr {
-       DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
        DOMAIN_ATTR_IO_PGTABLE_CFG,
        DOMAIN_ATTR_MAX,
 };
                                 void *data);
 int iommu_enable_nesting(struct iommu_domain *domain);
 
+void iommu_set_dma_strict(bool val);
+bool iommu_get_dma_strict(struct iommu_domain *domain);
+
 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
                              unsigned long iova, int flags);