exynos_atomic_commit_complete(commit);
 }
 
+static struct device *exynos_drm_get_dma_device(void);
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
        struct exynos_drm_private *private;
        dev_set_drvdata(dev->dev, dev);
        dev->dev_private = (void *)private;
 
+       /* the first real CRTC device is used for all dma mapping operations */
+       private->dma_dev = exynos_drm_get_dma_device();
+       if (!private->dma_dev) {
+               DRM_ERROR("no device found for DMA mapping operations.\n");
+               ret = -ENODEV;
+               goto err_free_private;
+       }
+       DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
+                dev_name(private->dma_dev));
+
        /*
         * create mapping to manage iommu table and set a pointer to iommu
         * mapping structure to iommu_mapping of private data.
 
 #define DRM_COMPONENT_DRIVER   BIT(0)  /* supports component framework */
 #define DRM_VIRTUAL_DEVICE     BIT(1)  /* create virtual platform device */
+#define DRM_DMA_DEVICE         BIT(2)  /* can be used for dma allocations */
 
 #define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
 
 static struct exynos_drm_driver_info exynos_drm_drivers[] = {
        {
                DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
-               DRM_COMPONENT_DRIVER
+               DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
        }, {
                DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
-               DRM_COMPONENT_DRIVER
+               DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
        }, {
                DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
-               DRM_COMPONENT_DRIVER
+               DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
        }, {
                DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
-               DRM_COMPONENT_DRIVER
+               DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
        }, {
                DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
                DRM_COMPONENT_DRIVER
        },
 };
 
+static struct device *exynos_drm_get_dma_device(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+               struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
+               struct device *dev;
+
+               if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
+                       continue;
+
+               while ((dev = bus_find_device(&platform_bus_type, NULL,
+                                           &info->driver->driver,
+                                           (void *)platform_bus_type.match))) {
+                       put_device(dev);
+                       return dev;
+               }
+       }
+       return NULL;
+}
+
 static void exynos_drm_unregister_devices(void)
 {
        int i;
 
        struct drm_crtc *crtc[MAX_CRTC];
        struct drm_property *plane_zpos_property;
 
+       struct device *dma_dev;
        unsigned long da_start;
        unsigned long da_space_size;
+       void *mapping;
 
        unsigned int pipe;
 
        wait_queue_head_t       wait;
 };
 
+static inline struct device *to_dma_dev(struct drm_device *dev)
+{
+       struct exynos_drm_private *priv = dev->dev_private;
+
+       return priv->dma_dev;
+}
+
 /*
  * Exynos drm sub driver structure.
  *
 
        if (vm_size > exynos_gem->size)
                return -EINVAL;
 
-       ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->cookie,
+       ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie,
                             exynos_gem->dma_addr, exynos_gem->size,
                             &exynos_gem->dma_attrs);
        if (ret < 0) {
 
        init_dma_attrs(&g2d->cmdlist_dma_attrs);
        dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
 
-       g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+       g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
                                                G2D_CMDLIST_POOL_SIZE,
                                                &g2d->cmdlist_pool, GFP_KERNEL,
                                                &g2d->cmdlist_dma_attrs);
        return 0;
 
 err:
-       dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+       dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
                        g2d->cmdlist_pool_virt,
                        g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
        return ret;
        kfree(g2d->cmdlist_node);
 
        if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
-               dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+               dma_free_attrs(to_dma_dev(subdrv->drm_dev),
+                               G2D_CMDLIST_POOL_SIZE,
                                g2d->cmdlist_pool_virt,
                                g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
        }
 
                return -ENOMEM;
        }
 
-       exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
+       exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
                                             &exynos_gem->dma_addr, GFP_KERNEL,
                                             &exynos_gem->dma_attrs);
        if (!exynos_gem->cookie) {
                goto err_free;
        }
 
-       ret = dma_get_sgtable_attrs(dev->dev, &sgt, exynos_gem->cookie,
+       ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
                                    exynos_gem->dma_addr, exynos_gem->size,
                                    &exynos_gem->dma_attrs);
        if (ret < 0) {
 err_sgt_free:
        sg_free_table(&sgt);
 err_dma_free:
-       dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
+       dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
                       exynos_gem->dma_addr, &exynos_gem->dma_attrs);
 err_free:
        drm_free_large(exynos_gem->pages);
        DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
 
-       dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
+       dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
                        (dma_addr_t)exynos_gem->dma_addr,
                        &exynos_gem->dma_attrs);
 
        if (vm_size > exynos_gem->size)
                return -EINVAL;
 
-       ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->cookie,
+       ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
                             exynos_gem->dma_addr, exynos_gem->size,
                             &exynos_gem->dma_attrs);
        if (ret < 0) {
 
        mutex_lock(&drm_dev->struct_mutex);
 
-       nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+       nents = dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
        if (!nents) {
                DRM_ERROR("failed to map sgl with dma.\n");
                mutex_unlock(&drm_dev->struct_mutex);
                                struct sg_table *sgt,
                                enum dma_data_direction dir)
 {
-       dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+       dma_unmap_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
 }
 
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 
 {
        struct dma_iommu_mapping *mapping = NULL;
        struct exynos_drm_private *priv = drm_dev->dev_private;
-       struct device *dev = drm_dev->dev;
 
        if (!priv->da_start)
                priv->da_start = EXYNOS_DEV_ADDR_START;
        if (IS_ERR(mapping))
                return PTR_ERR(mapping);
 
-       dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
-                                       GFP_KERNEL);
-       if (!dev->dma_parms)
-               goto error;
-
-       dma_set_max_seg_size(dev, 0xffffffffu);
-       dev->archdata.mapping = mapping;
+       priv->mapping = mapping;
 
        return 0;
-error:
-       arm_iommu_release_mapping(mapping);
-       return -ENOMEM;
 }
 
 /*
  */
 void drm_release_iommu_mapping(struct drm_device *drm_dev)
 {
-       struct device *dev = drm_dev->dev;
+       struct exynos_drm_private *priv = drm_dev->dev_private;
 
-       arm_iommu_release_mapping(dev->archdata.mapping);
+       arm_iommu_release_mapping(priv->mapping);
 }
 
 /*
 int drm_iommu_attach_device(struct drm_device *drm_dev,
                                struct device *subdrv_dev)
 {
-       struct device *dev = drm_dev->dev;
+       struct exynos_drm_private *priv = drm_dev->dev_private;
        int ret;
 
-       if (!dev->archdata.mapping)
+       if (!priv->mapping)
                return 0;
 
        subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
        if (subdrv_dev->archdata.mapping)
                arm_iommu_detach_device(subdrv_dev);
 
-       ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+       ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
        if (ret < 0) {
                DRM_DEBUG_KMS("failed iommu attach.\n");
                return ret;
        }
 
-       /*
-        * Set dma_ops to drm_device just one time.
-        *
-        * The dma mapping api needs device object and the api is used
-        * to allocate physial memory and map it with iommu table.
-        * If iommu attach succeeded, the sub driver would have dma_ops
-        * for iommu and also all sub drivers have same dma_ops.
-        */
-       if (get_dma_ops(dev) == get_dma_ops(NULL))
-               set_dma_ops(dev, get_dma_ops(subdrv_dev));
-
        return 0;
 }
 
 void drm_iommu_detach_device(struct drm_device *drm_dev,
                                struct device *subdrv_dev)
 {
-       struct device *dev = drm_dev->dev;
-       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       struct exynos_drm_private *priv = drm_dev->dev_private;
+       struct dma_iommu_mapping *mapping = priv->mapping;
 
        if (!mapping || !mapping->domain)
                return;
 
 
 static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
 {
-       struct device *dev = drm_dev->dev;
+       struct exynos_drm_private *priv = drm_dev->dev_private;
 
-       return dev->archdata.mapping ? true : false;
+       return priv->mapping ? true : false;
 }
 
 #else