return ret;
 }
 
-static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
+static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
                                struct kfd_mem_attachment *entry,
                                struct amdgpu_sync *sync)
 {
        struct amdgpu_bo_va *bo_va = entry->bo_va;
+       struct amdgpu_device *adev = entry->adev;
        struct amdgpu_vm *vm = bo_va->base.vm;
 
        amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
 
        amdgpu_sync_fence(sync, bo_va->last_pt_update);
 
-       return 0;
+       kfd_mem_dmaunmap_attachment(mem, entry);
 }
 
-static int update_gpuvm_pte(struct amdgpu_device *adev,
-               struct kfd_mem_attachment *entry,
-               struct amdgpu_sync *sync)
+static int update_gpuvm_pte(struct kgd_mem *mem,
+                           struct kfd_mem_attachment *entry,
+                           struct amdgpu_sync *sync)
 {
-       int ret;
        struct amdgpu_bo_va *bo_va = entry->bo_va;
+       struct amdgpu_device *adev = entry->adev;
+       int ret;
+
+       ret = kfd_mem_dmamap_attachment(mem, entry);
+       if (ret)
+               return ret;
 
        /* Update the page tables  */
        ret = amdgpu_vm_bo_update(adev, bo_va, false);
        return amdgpu_sync_fence(sync, bo_va->last_pt_update);
 }
 
-static int map_bo_to_gpuvm(struct amdgpu_device *adev,
-               struct kfd_mem_attachment *entry, struct amdgpu_sync *sync,
-               bool no_update_pte)
+static int map_bo_to_gpuvm(struct kgd_mem *mem,
+                          struct kfd_mem_attachment *entry,
+                          struct amdgpu_sync *sync,
+                          bool no_update_pte)
 {
        int ret;
 
        /* Set virtual address for the allocation */
-       ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
+       ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
                               amdgpu_bo_size(entry->bo_va->base.bo),
                               entry->pte_flags);
        if (ret) {
        if (no_update_pte)
                return 0;
 
-       ret = update_gpuvm_pte(adev, entry, sync);
+       ret = update_gpuvm_pte(mem, entry, sync);
        if (ret) {
                pr_err("update_gpuvm_pte() failed\n");
                goto update_gpuvm_pte_failed;
        return 0;
 
 update_gpuvm_pte_failed:
-       unmap_bo_from_gpuvm(adev, entry, sync);
+       unmap_bo_from_gpuvm(mem, entry, sync);
        return ret;
 }
 
                pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
                         entry->va, entry->va + bo_size, entry);
 
-               ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
+               ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
                                      is_invalid_userptr);
                if (ret) {
                        pr_err("Failed to map bo to gpuvm\n");
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
 {
-       struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
        struct amdkfd_process_info *process_info = avm->process_info;
        unsigned long bo_size = mem->bo->tbo.base.size;
                pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
                         entry->va, entry->va + bo_size, entry);
 
-               ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
-               if (ret == 0) {
-                       entry->is_mapped = false;
-               } else {
-                       pr_err("failed to unmap VA 0x%llx\n", mem->va);
-                       goto unreserve_out;
-               }
+               unmap_bo_from_gpuvm(mem, entry, ctx.sync);
+               entry->is_mapped = false;
 
                mem->mapped_to_gpu_memory--;
                pr_debug("\t DEC mapping count %d\n",
                        if (!attachment->is_mapped)
                                continue;
 
-                       ret = update_gpuvm_pte((struct amdgpu_device *)
-                                              attachment->adev,
-                                              attachment, &sync);
+                       kfd_mem_dmaunmap_attachment(mem, attachment);
+                       ret = update_gpuvm_pte(mem, attachment, &sync);
                        if (ret) {
                                pr_err("%s: update PTE failed\n", __func__);
                                /* make sure this gets validated again */
                        goto validate_map_fail;
                }
                list_for_each_entry(attachment, &mem->attachments, list) {
-                       ret = update_gpuvm_pte((struct amdgpu_device *)
-                                             attachment->adev, attachment,
-                                             &sync_obj);
+                       if (!attachment->is_mapped)
+                               continue;
+
+                       kfd_mem_dmaunmap_attachment(mem, attachment);
+                       ret = update_gpuvm_pte(mem, attachment, &sync_obj);
                        if (ret) {
                                pr_debug("Memory eviction: update PTE failed. Try again\n");
                                goto validate_map_fail;