.flags          = cpu_to_le32(flags),
                };
 
-               ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+               ret = viommu_add_req(vdomain->viommu, &map, sizeof(map));
                if (ret) {
                        viommu_del_mappings(vdomain, iova, end);
                        return ret;
        viommu_sync_req(vdomain->viommu);
 }
 
+static int viommu_iotlb_sync_map(struct iommu_domain *domain,
+                                unsigned long iova, size_t size)
+{
+       struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+       /*
+        * May be called before the viommu is initialized including
+        * while creating direct mapping
+        */
+       if (!vdomain->nr_endpoints)
+               return 0;
+       return viommu_sync_req(vdomain->viommu);
+}
+
 static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
 {
        struct iommu_resv_region *entry, *new_entry, *msi = NULL;
                .unmap_pages            = viommu_unmap_pages,
                .iova_to_phys           = viommu_iova_to_phys,
                .iotlb_sync             = viommu_iotlb_sync,
+               .iotlb_sync_map         = viommu_iotlb_sync_map,
                .free                   = viommu_domain_free,
        }
 };