void amd_iommu_iopf_uninit(struct amd_iommu *iommu);
 void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
                             struct iommu_page_response *resp);
+int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
+                             struct iommu_dev_data *dev_data);
+void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
+                                 struct iommu_dev_data *dev_data);
 
 /* GCR3 setup */
 int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
 
                if (ret)
                        return ret;
 
-               if (pdev)
+               if (pdev) {
                        pdev_enable_caps(pdev);
+
+                       /*
+                        * Device can continue to function even if IOPF
+                        * enablement failed. Hence in error path just
+                        * disable device PRI support.
+                        */
+                       if (amd_iommu_iopf_add_device(iommu, dev_data))
+                               pdev_disable_cap_pri(pdev);
+               }
        } else if (pdev) {
                pdev_enable_cap_ats(pdev);
        }
  */
 static void detach_device(struct device *dev)
 {
-       struct protection_domain *domain;
-       struct iommu_dev_data *dev_data;
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+       struct protection_domain *domain = dev_data->domain;
+       struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
        unsigned long flags;
-
-       dev_data = dev_iommu_priv_get(dev);
-       domain   = dev_data->domain;
+       bool ppr = dev_data->ppr;
 
        spin_lock_irqsave(&domain->lock, flags);
 
        if (WARN_ON(!dev_data->domain))
                goto out;
 
+       if (ppr) {
+               iopf_queue_flush_dev(dev);
+
+               /* Updated here so that it gets reflected in DTE */
+               dev_data->ppr = false;
+       }
+
        do_detach(dev_data);
 
+       /* Remove IOPF handler */
+       if (ppr)
+               amd_iommu_iopf_remove_device(iommu, dev_data);
+
        if (dev_is_pci(dev))
                pdev_disable_caps(to_pci_dev(dev));
 
 static int amd_iommu_dev_enable_feature(struct device *dev,
                                        enum iommu_dev_features feat)
 {
-       int ret;
+       int ret = 0;
 
        switch (feat) {
+       case IOMMU_DEV_FEAT_IOPF:
+               break;
        default:
                ret = -EINVAL;
                break;
 static int amd_iommu_dev_disable_feature(struct device *dev,
                                         enum iommu_dev_features feat)
 {
-       int ret;
+       int ret = 0;
 
        switch (feat) {
+       case IOMMU_DEV_FEAT_IOPF:
+               break;
        default:
                ret = -EINVAL;
                break;
 
 {
        amd_iommu_complete_ppr(dev, resp->pasid, resp->code, resp->grpid);
 }
+
+int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
+                             struct iommu_dev_data *dev_data)
+{
+       unsigned long flags;
+       int ret = 0;
+
+       if (!dev_data->pri_enabled)
+               return ret;
+
+       raw_spin_lock_irqsave(&iommu->lock, flags);
+
+       if (!iommu->iopf_queue) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev);
+       if (ret)
+               goto out_unlock;
+
+       dev_data->ppr = true;
+
+out_unlock:
+       raw_spin_unlock_irqrestore(&iommu->lock, flags);
+       return ret;
+}
+
+/* Its assumed that caller has verified that device was added to iopf queue */
+void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
+                                 struct iommu_dev_data *dev_data)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&iommu->lock, flags);
+
+       iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev);
+       dev_data->ppr = false;
+
+       raw_spin_unlock_irqrestore(&iommu->lock, flags);
+}