struct kgd_mem *mem, void *drm_priv);
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
 int amdgpu_amdkfd_gpuvm_sync_memory(
                struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
 
        return ret;
 }
 
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
 {
        struct kfd_mem_attachment *entry;
        struct amdgpu_vm *vm;
+       int ret;
 
        vm = drm_priv_to_vm(drm_priv);
 
        mutex_lock(&mem->lock);
 
+       ret = amdgpu_bo_reserve(mem->bo, true);
+       if (ret)
+               goto out;
+
        list_for_each_entry(entry, &mem->attachments, list) {
-               if (entry->bo_va->base.vm == vm)
-                       kfd_mem_dmaunmap_attachment(mem, entry);
+               if (entry->bo_va->base.vm != vm)
+                       continue;
+               if (entry->bo_va->base.bo->tbo.ttm &&
+                   !entry->bo_va->base.bo->tbo.ttm->sg)
+                       continue;
+
+               kfd_mem_dmaunmap_attachment(mem, entry);
        }
 
+       amdgpu_bo_unreserve(mem->bo);
+out:
        mutex_unlock(&mem->lock);
+
+       return ret;
 }
 
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 
                        kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
 
                /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
-               amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+               err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+               if (err)
+                       goto sync_memory_failed;
        }
 
        mutex_unlock(&p->mutex);