return ret;
 }
 
+static bool iommu_is_attach_deferred(struct device *dev)
+{
+       const struct iommu_ops *ops = dev_iommu_ops(dev);
+
+       if (ops->is_attach_deferred)
+               return ops->is_attach_deferred(dev);
+
+       return false;
+}
+
+static int iommu_group_do_dma_first_attach(struct device *dev, void *data)
+{
+       struct iommu_domain *domain = data;
+
+       lockdep_assert_held(&dev->iommu_group->mutex);
+
+       if (iommu_is_attach_deferred(dev)) {
+               dev->iommu->attach_deferred = 1;
+               return 0;
+       }
+
+       return __iommu_attach_device(domain, dev);
+}
+
 int iommu_probe_device(struct device *dev)
 {
        const struct iommu_ops *ops;
         * attach the default domain.
         */
        if (group->default_domain && !group->owner) {
-               ret = __iommu_attach_device(group->default_domain, dev);
+               ret = iommu_group_do_dma_first_attach(dev, group->default_domain);
                if (ret) {
                        mutex_unlock(&group->mutex);
                        iommu_group_put(group);
        return ret;
 }
 
-static bool iommu_is_attach_deferred(struct device *dev)
-{
-       const struct iommu_ops *ops = dev_iommu_ops(dev);
-
-       if (ops->is_attach_deferred)
-               return ops->is_attach_deferred(dev);
-
-       return false;
-}
-
 /**
  * iommu_group_add_device - add a device to an iommu group
  * @group: the group into which to add the device (reference should be held)
 
        mutex_lock(&group->mutex);
        list_add_tail(&device->list, &group->devices);
-       if (group->domain  && !iommu_is_attach_deferred(dev))
-               ret = __iommu_attach_device(group->domain, dev);
+       if (group->domain)
+               ret = iommu_group_do_dma_first_attach(dev, group->domain);
        mutex_unlock(&group->mutex);
        if (ret)
                goto err_put_group;
 
 }
 
-static int iommu_group_do_dma_attach(struct device *dev, void *data)
-{
-       struct iommu_domain *domain = data;
-       int ret = 0;
-
-       if (!iommu_is_attach_deferred(dev))
-               ret = __iommu_attach_device(domain, dev);
-
-       return ret;
-}
-
-static int __iommu_group_dma_attach(struct iommu_group *group)
+static int __iommu_group_dma_first_attach(struct iommu_group *group)
 {
        return __iommu_group_for_each_dev(group, group->default_domain,
-                                         iommu_group_do_dma_attach);
+                                         iommu_group_do_dma_first_attach);
 }
 
 static int iommu_group_do_probe_finalize(struct device *dev, void *data)
 
                iommu_group_create_direct_mappings(group);
 
-               ret = __iommu_group_dma_attach(group);
+               ret = __iommu_group_dma_first_attach(group);
 
                mutex_unlock(&group->mutex);
 
                return -ENODEV;
 
        ret = domain->ops->attach_dev(domain, dev);
-       if (!ret)
-               trace_attach_device_to_domain(dev);
-       return ret;
+       if (ret)
+               return ret;
+       dev->iommu->attach_deferred = 0;
+       trace_attach_device_to_domain(dev);
+       return 0;
 }
 
 /**
 
 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
 {
-       if (iommu_is_attach_deferred(dev))
+       if (dev->iommu && dev->iommu->attach_deferred)
                return __iommu_attach_device(domain, dev);
 
        return 0;
 static void __iommu_detach_device(struct iommu_domain *domain,
                                  struct device *dev)
 {
-       if (iommu_is_attach_deferred(dev))
-               return;
-
        domain->ops->detach_dev(domain, dev);
        trace_detach_device_from_domain(dev);
 }