case IOMMU_CAP_CACHE_COHERENCY:
                /* Assume that a coherent TCU implies coherent TBUs */
                return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
+       case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
+               return arm_smmu_master_canwbs(master);
        case IOMMU_CAP_NOEXEC:
        case IOMMU_CAP_DEFERRED_FLUSH:
                return true;
        }
 }
 
+static bool arm_smmu_enforce_cache_coherency(struct iommu_domain *domain)
+{
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+       struct arm_smmu_master_domain *master_domain;
+       unsigned long flags;
+       bool ret = true;
+
+       spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+       list_for_each_entry(master_domain, &smmu_domain->devices,
+                           devices_elm) {
+               if (!arm_smmu_master_canwbs(master_domain->master)) {
+                       ret = false;
+                       break;
+               }
+       }
+       smmu_domain->enforce_cache_coherency = ret;
+       spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+       return ret;
+}
+
 struct arm_smmu_domain *arm_smmu_domain_alloc(void)
 {
        struct arm_smmu_domain *smmu_domain;
                 * one of them.
                 */
                spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+               if (smmu_domain->enforce_cache_coherency &&
+                   !arm_smmu_master_canwbs(master)) {
+                       spin_unlock_irqrestore(&smmu_domain->devices_lock,
+                                              flags);
+                       kfree(master_domain);
+                       return -EINVAL;
+               }
+
                if (state->ats_enabled)
                        atomic_inc(&smmu_domain->nr_ats_masters);
                list_add(&master_domain->devices_elm, &smmu_domain->devices);
        .owner                  = THIS_MODULE,
        .default_domain_ops = &(const struct iommu_domain_ops) {
                .attach_dev             = arm_smmu_attach_dev,
+               .enforce_cache_coherency = arm_smmu_enforce_cache_coherency,
                .set_dev_pasid          = arm_smmu_s1_set_dev_pasid,
                .map_pages              = arm_smmu_map_pages,
                .unmap_pages            = arm_smmu_unmap_pages,
 
        /* List of struct arm_smmu_master_domain */
        struct list_head                devices;
        spinlock_t                      devices_lock;
+       bool                            enforce_cache_coherency : 1;
 
        struct mmu_notifier             mmu_notifier;
 };
 int arm_smmu_cmdq_init(struct arm_smmu_device *smmu,
                       struct arm_smmu_cmdq *cmdq);
 
+static inline bool arm_smmu_master_canwbs(struct arm_smmu_master *master)
+{
+       return dev_iommu_fwspec_get(master->dev)->flags &
+              IOMMU_FWSPEC_PCI_RC_CANWBS;
+}
+
 #ifdef CONFIG_ARM_SMMU_V3_SVA
 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);