{
        struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
 
+       drm_free_large(vgem_obj->pages);
+
+       if (obj->import_attach)
+               drm_prime_gem_destroy(obj, vgem_obj->table);
+
        drm_gem_object_release(obj);
        kfree(vgem_obj);
 }
        struct drm_vgem_gem_object *obj = vma->vm_private_data;
        /* We don't use vmf->pgoff since that has the fake offset */
        unsigned long vaddr = vmf->address;
-       struct page *page;
-
-       page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
-                                      (vaddr - vma->vm_start) >> PAGE_SHIFT);
-       if (!IS_ERR(page)) {
-               vmf->page = page;
-               return 0;
-       } else switch (PTR_ERR(page)) {
-               case -ENOSPC:
-               case -ENOMEM:
-                       return VM_FAULT_OOM;
-               case -EBUSY:
-                       return VM_FAULT_RETRY;
-               case -EFAULT:
-               case -EINVAL:
-                       return VM_FAULT_SIGBUS;
-               default:
-                       WARN_ON_ONCE(PTR_ERR(page));
-                       return VM_FAULT_SIGBUS;
+       int ret;
+       loff_t num_pages;
+       pgoff_t page_offset;
+       page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
+
+       num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
+
+       if (page_offset > num_pages)
+               return VM_FAULT_SIGBUS;
+
+       if (obj->pages) {
+               get_page(obj->pages[page_offset]);
+               vmf->page = obj->pages[page_offset];
+               ret = 0;
+       } else {
+               struct page *page;
+
+               page = shmem_read_mapping_page(
+                                       file_inode(obj->base.filp)->i_mapping,
+                                       page_offset);
+               if (!IS_ERR(page)) {
+                       vmf->page = page;
+                       ret = 0;
+               } else switch (PTR_ERR(page)) {
+                       case -ENOSPC:
+                       case -ENOMEM:
+                               ret = VM_FAULT_OOM;
+                               break;
+                       case -EBUSY:
+                               ret = VM_FAULT_RETRY;
+                               break;
+                       case -EFAULT:
+                       case -EINVAL:
+                               ret = VM_FAULT_SIGBUS;
+                               break;
+                       default:
+                               WARN_ON(PTR_ERR(page));
+                               ret = VM_FAULT_SIGBUS;
+                               break;
+               }
+
        }
+       return ret;
 }
 
 static const struct vm_operations_struct vgem_gem_vm_ops = {
        kfree(vfile);
 }
 
-/* ioctls */
-
-static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
-                                             struct drm_file *file,
-                                             unsigned int *handle,
-                                             unsigned long size)
+static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
+                                               unsigned long size)
 {
        struct drm_vgem_gem_object *obj;
        int ret;
                return ERR_PTR(-ENOMEM);
 
        ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
-       if (ret)
-               goto err_free;
+       if (ret) {
+               kfree(obj);
+               return ERR_PTR(ret);
+       }
+
+       return obj;
+}
+
+static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
+{
+       drm_gem_object_release(&obj->base);
+       kfree(obj);
+}
+
+static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
+                                             struct drm_file *file,
+                                             unsigned int *handle,
+                                             unsigned long size)
+{
+       struct drm_vgem_gem_object *obj;
+       int ret;
+
+       obj = __vgem_gem_create(dev, size);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
 
        ret = drm_gem_handle_create(file, &obj->base, handle);
        drm_gem_object_unreference_unlocked(&obj->base);
 
        return &obj->base;
 
-err_free:
-       kfree(obj);
 err:
+       __vgem_gem_destroy(obj);
        return ERR_PTR(ret);
 }
 
        return st;
 }
 
+static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
+                                               struct dma_buf *dma_buf)
+{
+       return drm_gem_prime_import_dev(dev, dma_buf, &vgem_platform->dev);
+}
+
+static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
+                       struct dma_buf_attachment *attach, struct sg_table *sg)
+{
+       struct drm_vgem_gem_object *obj;
+       int npages;
+
+       obj = __vgem_gem_create(dev, attach->dmabuf->size);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
+
+       obj->table = sg;
+       obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+       if (!obj->pages) {
+               __vgem_gem_destroy(obj);
+               return ERR_PTR(-ENOMEM);
+       }
+       drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
+                                       npages);
+       return &obj->base;
+}
+
 static void *vgem_prime_vmap(struct drm_gem_object *obj)
 {
        long n_pages = obj->size >> PAGE_SHIFT;
        .dumb_map_offset                = vgem_gem_dumb_map,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_pin = vgem_prime_pin,
+       .gem_prime_import = vgem_prime_import,
        .gem_prime_export = drm_gem_prime_export,
+       .gem_prime_import_sg_table = vgem_prime_import_sg_table,
        .gem_prime_get_sg_table = vgem_prime_get_sg_table,
        .gem_prime_vmap = vgem_prime_vmap,
        .gem_prime_vunmap = vgem_prime_vunmap,