spin_unlock_irqrestore(&domain->cache_lock, flags);
}
+/* domain->qi_batch will be freed in iommu_free_domain() path. */
+static int domain_qi_batch_alloc(struct dmar_domain *domain)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&domain->cache_lock, flags);
+ if (domain->qi_batch)
+ goto out_unlock;
+
+ domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_ATOMIC);
+ if (!domain->qi_batch)
+ ret = -ENOMEM;
+out_unlock:
+ spin_unlock_irqrestore(&domain->cache_lock, flags);
+
+ return ret;
+}
+
static int __cache_tag_assign_domain(struct dmar_domain *domain, u16 did,
struct device *dev, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
int ret;
+ ret = domain_qi_batch_alloc(domain);
+ if (ret)
+ return ret;
+
ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
if (ret || !info->ats_enabled)
return ret;
struct device_domain_info *info = dev_iommu_priv_get(dev);
int ret;
+ ret = domain_qi_batch_alloc(domain);
+ if (ret)
+ return ret;
+
ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
if (ret || !info->ats_enabled)
return ret;
u16 did = domain_get_id_for_dev(domain, dev);
int ret;
- /* domain->qi_bach will be freed in iommu_free_domain() path. */
- if (!domain->qi_batch) {
- domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_KERNEL);
- if (!domain->qi_batch)
- return -ENOMEM;
- }
-
ret = __cache_tag_assign_domain(domain, did, dev, pasid);
if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED)
return ret;