break;
                }
        }
+       WARN_ON_ONCE(!dev_pasid);
+       spin_unlock_irqrestore(&dmar_domain->lock, flags);
  
+       domain_detach_iommu(dmar_domain, iommu);
+       kfree(dev_pasid);
+ out_tear_down:
        intel_pasid_tear_down_entry(iommu, dev, pasid, false);
+       intel_drain_pasid_prq(dev, pasid);
+ }
+ 
+ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
+                                    struct device *dev, ioasid_t pasid)
+ {
+       struct device_domain_info *info = dev_iommu_priv_get(dev);
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       struct intel_iommu *iommu = info->iommu;
+       struct dev_pasid_info *dev_pasid;
+       unsigned long flags;
+       int ret;
+ 
+       if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
+               return -EOPNOTSUPP;
+ 
+       if (context_copied(iommu, info->bus, info->devfn))
+               return -EBUSY;
+ 
+       ret = prepare_domain_attach_device(domain, dev);
+       if (ret)
+               return ret;
+ 
+       dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL);
+       if (!dev_pasid)
+               return -ENOMEM;
+ 
+       ret = domain_attach_iommu(dmar_domain, iommu);
+       if (ret)
+               goto out_free;
+ 
+       if (domain_type_is_si(dmar_domain))
+               ret = intel_pasid_setup_pass_through(iommu, dmar_domain,
+                                                    dev, pasid);
+       else if (dmar_domain->use_first_level)
+               ret = domain_setup_first_level(iommu, dmar_domain,
+                                              dev, pasid);
+       else
+               ret = intel_pasid_setup_second_level(iommu, dmar_domain,
+                                                    dev, pasid);
+       if (ret)
+               goto out_detach_iommu;
+ 
+       dev_pasid->dev = dev;
+       dev_pasid->pasid = pasid;
+       spin_lock_irqsave(&dmar_domain->lock, flags);
+       list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids);
+       spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ 
+       return 0;
+ out_detach_iommu:
+       domain_detach_iommu(dmar_domain, iommu);
+ out_free:
+       kfree(dev_pasid);
+       return ret;
  }
  
 +static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
 +{
 +      struct device_domain_info *info = dev_iommu_priv_get(dev);
 +      struct intel_iommu *iommu = info->iommu;
 +      struct iommu_hw_info_vtd *vtd;
 +
 +      vtd = kzalloc(sizeof(*vtd), GFP_KERNEL);
 +      if (!vtd)
 +              return ERR_PTR(-ENOMEM);
 +
 +      vtd->cap_reg = iommu->cap;
 +      vtd->ecap_reg = iommu->ecap;
 +      *length = sizeof(*vtd);
 +      *type = IOMMU_HW_INFO_TYPE_INTEL_VTD;
 +      return vtd;
 +}
 +
  const struct iommu_ops intel_iommu_ops = {
        .capable                = intel_iommu_capable,
 +      .hw_info                = intel_iommu_hw_info,
        .domain_alloc           = intel_iommu_domain_alloc,
        .probe_device           = intel_iommu_probe_device,
        .probe_finalize         = intel_iommu_probe_finalize,