spin_unlock_irqrestore(&domain->lock, flags);
 }
 
+static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
+                                    struct dmar_domain *domain, u64 addr,
+                                    unsigned long npages, bool ih)
+{
+       u16 did = domain_id_iommu(domain, iommu);
+       unsigned long flags;
+
+       spin_lock_irqsave(&domain->lock, flags);
+       qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih);
+       spin_unlock_irqrestore(&domain->lock, flags);
+}
+
 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
                                  struct dmar_domain *domain,
                                  unsigned long pfn, unsigned int pages,
                ih = 1 << 6;
 
        if (domain->use_first_level) {
-               qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, pages, ih);
+               domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
        } else {
                unsigned long bitmask = aligned_pages - 1;
 
                u16 did = domain_id_iommu(dmar_domain, iommu);
 
                if (dmar_domain->use_first_level)
-                       qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, 0, -1, 0);
+                       domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0);
                else
                        iommu->flush.flush_iotlb(iommu, did, 0, 0,
                                                 DMA_TLB_DSI_FLUSH);