/* GPUVM API */
 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
                                        struct file *filp, u32 pasid,
-                                       void **vm, void **process_info,
+                                       void **process_info,
                                        struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
-uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
+void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv);
+uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                struct kgd_dev *kgd, uint64_t va, uint64_t size,
-               void *vm, struct kgd_mem **mem,
+               void *drm_priv, struct kgd_mem **mem,
                uint64_t *offset, uint32_t flags);
 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
                struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size);
 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
+               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
+               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
 int amdgpu_amdkfd_gpuvm_sync_memory(
                struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
                                              struct kfd_vm_fault_info *info);
 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
                                      struct dma_buf *dmabuf,
-                                     uint64_t va, void *vm,
+                                     uint64_t va, void *drm_priv,
                                      struct kgd_mem **mem, uint64_t *size,
                                      uint64_t *mmap_offset);
 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
 
        return 0;
 }
 
+static struct amdgpu_vm *drm_priv_to_vm(struct drm_file *drm_priv)
+{
+       struct amdgpu_fpriv *fpriv = drm_priv->driver_priv;
+
+       return &fpriv->vm;
+}
+
 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
                       struct dma_fence **ef)
 {
 
 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
                                           struct file *filp, u32 pasid,
-                                          void **vm, void **process_info,
+                                          void **process_info,
                                           struct dma_fence **ef)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
-       struct drm_file *drm_priv = filp->private_data;
-       struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
-       struct amdgpu_vm *avm = &drv_priv->vm;
+       struct amdgpu_fpriv *drv_priv;
+       struct amdgpu_vm *avm;
        int ret;
 
+       ret = amdgpu_file_to_fpriv(filp, &drv_priv);
+       if (ret)
+               return ret;
+       avm = &drv_priv->vm;
+
        /* Already a compute VM? */
        if (avm->process_info)
                return -EINVAL;
        if (ret)
                return ret;
 
-       *vm = (void *)avm;
+       amdgpu_vm_set_task_info(avm);
 
        return 0;
 }
        }
 }
 
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
+void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
-       struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+       struct amdgpu_vm *avm;
 
-       if (WARN_ON(!kgd || !vm))
+       if (WARN_ON(!kgd || !drm_priv))
                return;
 
-       pr_debug("Releasing process vm %p\n", vm);
+       avm = drm_priv_to_vm(drm_priv);
+
+       pr_debug("Releasing process vm %p\n", avm);
 
        /* The original pasid of amdgpu vm has already been
         * released during making a amdgpu vm to a compute vm
        amdgpu_vm_release_compute(adev, avm);
 }
 
-uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
+uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
 {
-       struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+       struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
        struct amdgpu_bo *pd = avm->root.base.bo;
        struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
 
 
 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                struct kgd_dev *kgd, uint64_t va, uint64_t size,
-               void *vm, struct kgd_mem **mem,
+               void *drm_priv, struct kgd_mem **mem,
                uint64_t *offset, uint32_t flags)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
-       struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+       struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
        enum ttm_bo_type bo_type = ttm_bo_type_device;
        struct sg_table *sg = NULL;
        uint64_t user_addr = 0;
 }
 
 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
+               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
-       struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+       struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
        int ret;
        struct amdgpu_bo *bo;
        uint32_t domain;
        pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
                        mem->va,
                        mem->va + bo_size * (1 + mem->aql_queue),
-                       vm, domain_string(domain));
+                       avm, domain_string(domain));
 
-       ret = reserve_bo_and_vm(mem, vm, &ctx);
+       ret = reserve_bo_and_vm(mem, avm, &ctx);
        if (unlikely(ret))
                goto out;
 
        }
 
        list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
-               if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
+               if (entry->bo_va->base.vm == avm && !entry->is_mapped) {
                        pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
                                        entry->va, entry->va + bo_size,
                                        entry);
                                goto map_bo_to_gpuvm_failed;
                        }
 
-                       ret = vm_update_pds(vm, ctx.sync);
+                       ret = vm_update_pds(avm, ctx.sync);
                        if (ret) {
                                pr_err("Failed to update page directories\n");
                                goto map_bo_to_gpuvm_failed;
 }
 
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
+               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
-       struct amdkfd_process_info *process_info =
-               ((struct amdgpu_vm *)vm)->process_info;
+       struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
+       struct amdkfd_process_info *process_info = avm->process_info;
        unsigned long bo_size = mem->bo->tbo.base.size;
        struct kfd_bo_va_list *entry;
        struct bo_vm_reservation_context ctx;
 
        mutex_lock(&mem->lock);
 
-       ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
+       ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
        if (unlikely(ret))
                goto out;
        /* If no VMs were reserved, it means the BO wasn't actually mapped */
                goto unreserve_out;
        }
 
-       ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
+       ret = vm_validate_pt_pd_bos(avm);
        if (unlikely(ret))
                goto unreserve_out;
 
        pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
                mem->va,
                mem->va + bo_size * (1 + mem->aql_queue),
-               vm);
+               avm);
 
        list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
-               if (entry->bo_va->base.vm == vm && entry->is_mapped) {
+               if (entry->bo_va->base.vm == avm && entry->is_mapped) {
                        pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
                                        entry->va,
                                        entry->va + bo_size,
 
 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
                                      struct dma_buf *dma_buf,
-                                     uint64_t va, void *vm,
+                                     uint64_t va, void *drm_priv,
                                      struct kgd_mem **mem, uint64_t *size,
                                      uint64_t *mmap_offset)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+       struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
        struct drm_gem_object *obj;
        struct amdgpu_bo *bo;
-       struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
 
        if (dma_buf->ops != &amdgpu_dmabuf_ops)
                /* Can't handle non-graphics buffers */
 
 
        err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                dev->kgd, args->va_addr, args->size,
-               pdd->vm, (struct kgd_mem **) &mem, &offset,
+               pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
                flags);
 
        if (err)
                        goto get_mem_obj_from_handle_failed;
                }
                err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-                       peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+                       peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);
                if (err) {
                        pr_err("Failed to map to gpu %d/%d\n",
                               i, args->n_devices);
                        goto get_mem_obj_from_handle_failed;
                }
                err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
-                       peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+                       peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);
                if (err) {
                        pr_err("Failed to unmap from gpu %d/%d\n",
                               i, args->n_devices);
        }
 
        r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
-                                             args->va_addr, pdd->vm,
+                                             args->va_addr, pdd->drm_priv,
                                              (struct kgd_mem **)&mem, &size,
                                              NULL);
        if (r)
 
 {
        struct kfd_dev *dev = pdd->dev;
 
-       amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
+       amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv);
        amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
 }
 
        int err;
 
        err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
-                                                pdd->vm, &mem, NULL, flags);
+                                                pdd->drm_priv, &mem, NULL, flags);
        if (err)
                goto err_alloc_mem;
 
-       err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
+       err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);
        if (err)
                goto err_map_mem;
 
                for (i = 0; i < p->n_pdds; i++) {
                        struct kfd_process_device *peer_pdd = p->pdds[i];
 
-                       if (!peer_pdd->vm)
+                       if (!peer_pdd->drm_priv)
                                continue;
                        amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
-                               peer_pdd->dev->kgd, mem, peer_pdd->vm);
+                               peer_pdd->dev->kgd, mem, peer_pdd->drm_priv);
                }
 
                amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
 
                if (pdd->drm_file) {
                        amdgpu_amdkfd_gpuvm_release_process_vm(
-                                       pdd->dev->kgd, pdd->vm);
+                                       pdd->dev->kgd, pdd->drm_priv);
                        fput(pdd->drm_file);
                }
 
        if (!drm_file)
                return -EINVAL;
 
-       if (pdd->vm)
+       if (pdd->drm_priv)
                return -EBUSY;
 
        p = pdd->process;
 
        ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
                dev->kgd, drm_file, p->pasid,
-               &pdd->vm, &p->kgd_process_info, &p->ef);
+               &p->kgd_process_info, &p->ef);
        if (ret) {
                pr_err("Failed to create process VM object\n");
                return ret;
        }
-
-       amdgpu_vm_set_task_info(pdd->vm);
+       pdd->drm_priv = drm_file->private_data;
 
        ret = kfd_process_device_reserve_ib_mem(pdd);
        if (ret)
 err_init_cwsr:
 err_reserve_ib_mem:
        kfd_process_device_free_bos(pdd);
-       pdd->vm = NULL;
+       pdd->drm_priv = NULL;
 
        return ret;
 }
                return ERR_PTR(-ENOMEM);
        }
 
-       if (!pdd->vm)
+       if (!pdd->drm_priv)
                return ERR_PTR(-ENODEV);
 
        /*