return -ENOMEM;
                }
 
-               buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
+               buf->cookie = dma_alloc_attrs(dev->dev,
                                        buf->size,
                                        &buf->dma_addr, GFP_KERNEL,
                                        &buf->dma_attrs);
-               if (!buf->kvaddr) {
+               if (!buf->cookie) {
                        DRM_ERROR("failed to allocate buffer.\n");
                        ret = -ENOMEM;
                        goto err_free;
        buf->sgt = NULL;
 
        if (!is_drm_iommu_supported(dev)) {
-               dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
+               dma_free_attrs(dev->dev, buf->size, buf->cookie,
                                (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
                drm_free_large(buf->pages);
        } else
 
                                     struct drm_framebuffer *fb)
 {
        struct fb_info *fbi = helper->fbdev;
-       struct drm_device *dev = helper->dev;
        struct exynos_drm_gem_buf *buffer;
        unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
+       unsigned int nr_pages;
        unsigned long offset;
 
        drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
                return -EFAULT;
        }
 
-       /* map pages with kernel virtual space. */
+       nr_pages = buffer->size >> PAGE_SHIFT;
+
+       buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
+                       nr_pages, VM_MAP,
+                       pgprot_writecombine(PAGE_KERNEL));
        if (!buffer->kvaddr) {
-               if (is_drm_iommu_supported(dev)) {
-                       unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
-
-                       buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
-                                       nr_pages, VM_MAP,
-                                       pgprot_writecombine(PAGE_KERNEL));
-               } else {
-                       phys_addr_t dma_addr = buffer->dma_addr;
-                       if (dma_addr)
-                               buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
-                       else
-                               buffer->kvaddr = (void __iomem *)NULL;
-               }
-               if (!buffer->kvaddr) {
-                       DRM_ERROR("failed to map pages to kernel space.\n");
-                       return -EIO;
-               }
+               DRM_ERROR("failed to map pages to kernel space.\n");
+               return -EIO;
        }
 
        /* buffer count to framebuffer always is 1 at booting time. */
        struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
        struct drm_framebuffer *fb;
 
-       if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
+       if (exynos_gem_obj->buffer->kvaddr)
                vunmap(exynos_gem_obj->buffer->kvaddr);
 
        /* release drm framebuffer and real buffer */
 
 /*
  * exynos drm gem buffer structure.
  *
+ * @cookie: cookie returned by dma_alloc_attrs
  * @kvaddr: kernel virtual address to allocated memory region.
  * *userptr: user space address.
  * @dma_addr: bus address(accessed by dma) to allocated memory region.
  *     VM_PFNMAP or not.
  */
 struct exynos_drm_gem_buf {
+       void                    *cookie;
        void __iomem            *kvaddr;
        unsigned long           userptr;
        dma_addr_t              dma_addr;