return 0;
 }
 
+static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
+                                       struct vm_area_struct *vma)
+{
+       DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+
+       /* non-cachable as default. */
+       if (obj->flags & EXYNOS_BO_CACHABLE)
+               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       else if (obj->flags & EXYNOS_BO_WC)
+               vma->vm_page_prot =
+                       pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+       else
+               vma->vm_page_prot =
+                       pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+}
+
 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
 {
        if (!IS_NONCONTIG_BUFFER(flags)) {
 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 {
        struct drm_gem_object *obj;
+       struct exynos_drm_gem_buf *buf;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (!exynos_gem_obj)
-               return;
-
        obj = &exynos_gem_obj->base;
+       buf = exynos_gem_obj->buffer;
 
        DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
 
-       if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
-                       exynos_gem_obj->buffer->pages)
+       if (!buf->pages)
+               return;
+
+       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
                exynos_drm_gem_put_pages(obj);
        else
-               exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
-                                       exynos_gem_obj->buffer);
+               exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
 
-       exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
+       exynos_drm_fini_buf(obj->dev, buf);
        exynos_gem_obj->buffer = NULL;
 
        if (obj->map_list.map)
 
        vma->vm_flags |= (VM_IO | VM_RESERVED);
 
-       /* in case of direct mapping, always having non-cachable attribute */
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       update_vm_cache_attr(exynos_gem_obj, vma);
 
        vm_size = usize = vma->vm_end - vma->vm_start;
 
 
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 {
+       struct exynos_drm_gem_obj *exynos_gem_obj;
+       struct drm_gem_object *obj;
        int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
                return ret;
        }
 
+       obj = vma->vm_private_data;
+       exynos_gem_obj = to_exynos_gem_obj(obj);
+
+       ret = check_gem_flags(exynos_gem_obj->flags);
+       if (ret) {
+               drm_gem_vm_close(vma);
+               drm_gem_free_mmap_offset(obj);
+               return ret;
+       }
+
        vma->vm_flags &= ~VM_PFNMAP;
        vma->vm_flags |= VM_MIXEDMAP;
 
+       update_vm_cache_attr(exynos_gem_obj, vma);
+
        return ret;
 }
 
 
 /* memory type definitions. */
 enum e_drm_exynos_gem_mem_type {
+       /* Physically Continuous memory and used as default. */
+       EXYNOS_BO_CONTIG        = 0 << 0,
        /* Physically Non-Continuous memory. */
        EXYNOS_BO_NONCONTIG     = 1 << 0,
-       EXYNOS_BO_MASK          = EXYNOS_BO_NONCONTIG
+       /* non-cachable mapping and used as default. */
+       EXYNOS_BO_NONCACHABLE   = 0 << 1,
+       /* cachable mapping. */
+       EXYNOS_BO_CACHABLE      = 1 << 1,
+       /* write-combine mapping. */
+       EXYNOS_BO_WC            = 1 << 2,
+       EXYNOS_BO_MASK          = EXYNOS_BO_NONCONTIG | EXYNOS_BO_CACHABLE |
+                                       EXYNOS_BO_WC
 };
 
 #define DRM_EXYNOS_GEM_CREATE          0x00