struct dmar_domain *domain;
        struct intel_iommu *iommu;
        struct qi_desc desc[3];
-       struct pci_dev *pdev;
        int head, tail;
        u16 sid, did;
-       int qdep;
 
        info = dev_iommu_priv_get(dev);
-       if (WARN_ON(!info || !dev_is_pci(dev)))
-               return;
-
        if (!info->pri_enabled)
                return;
 
        iommu = info->iommu;
        domain = info->domain;
-       pdev = to_pci_dev(dev);
        sid = PCI_DEVID(info->bus, info->devfn);
        did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID;
 
-       qdep = pci_ats_queue_depth(pdev);
-
        /*
         * Check and wait until all pending page requests in the queue are
         * handled by the prq handling thread.
        desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
                        QI_IWD_FENCE |
                        QI_IWD_TYPE;
-       desc[1].qw0 = QI_EIOTLB_PASID(pasid) |
-                       QI_EIOTLB_DID(did) |
-                       QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
-                       QI_EIOTLB_TYPE;
-       desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) |
-                       QI_DEV_EIOTLB_SID(sid) |
-                       QI_DEV_EIOTLB_QDEP(qdep) |
-                       QI_DEIOTLB_TYPE |
-                       QI_DEV_IOTLB_PFSID(info->pfsid);
+       if (pasid == IOMMU_NO_PASID) {
+               qi_desc_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, &desc[1]);
+               qi_desc_dev_iotlb(sid, info->pfsid, info->ats_qdep, 0,
+                                 MAX_AGAW_PFN_WIDTH, &desc[2]);
+       } else {
+               qi_desc_piotlb(did, pasid, 0, -1, 0, &desc[1]);
+               qi_desc_dev_iotlb_pasid(sid, info->pfsid, pasid, info->ats_qdep,
+                                       0, MAX_AGAW_PFN_WIDTH, &desc[2]);
+       }
 qi_retry:
        reinit_completion(&iommu->prq_complete);
        qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);