.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
 };
 
+static int amd_iommu_dev_enable_feature(struct device *dev,
+                                       enum iommu_dev_features feat)
+{
+       int ret;
+
+       switch (feat) {
+       default:
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static int amd_iommu_dev_disable_feature(struct device *dev,
+                                        enum iommu_dev_features feat)
+{
+       int ret;
+
+       switch (feat) {
+       default:
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
 const struct iommu_ops amd_iommu_ops = {
        .capable = amd_iommu_capable,
        .domain_alloc = amd_iommu_domain_alloc,
        .is_attach_deferred = amd_iommu_is_attach_deferred,
        .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
        .def_domain_type = amd_iommu_def_domain_type,
+       .dev_enable_feat = amd_iommu_dev_enable_feature,
+       .dev_disable_feat = amd_iommu_dev_disable_feature,
        .default_domain_ops = &(const struct iommu_domain_ops) {
                .attach_dev     = amd_iommu_attach_device,
                .map_pages      = amd_iommu_map_pages,