r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
        if (r) {
-               DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
+               drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
                return r;
        }
 
        r = amdgpu_bo_reserve(userq_obj->obj, true);
        if (r) {
-               DRM_ERROR("Failed to reserve BO to map (%d)", r);
+               drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
                goto free_obj;
        }
 
        r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
        if (r) {
-               DRM_ERROR("Failed to alloc GART for userqueue object (%d)", r);
+               drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
                goto unresv;
        }
 
        r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
        if (r) {
-               DRM_ERROR("Failed to map BO for userqueue (%d)", r);
+               drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
                goto unresv;
        }
 
 
        gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
        if (gobj == NULL) {
-               DRM_ERROR("Can't find GEM object for doorbell\n");
+               drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
                return -EINVAL;
        }
 
        /* Pin the BO before generating the index, unpin in queue destroy */
        r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
        if (r) {
-               DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
+               drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
                goto unref_bo;
        }
 
        r = amdgpu_bo_reserve(db_obj->obj, true);
        if (r) {
-               DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
+               drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
                goto unpin_bo;
        }
 
                break;
 
        default:
-               DRM_ERROR("[Usermode queues] IP %d not support\n", db_info->queue_type);
+               drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
+                            db_info->queue_type);
                r = -EINVAL;
                goto unpin_bo;
        }
        if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
            args->in.ip_type != AMDGPU_HW_IP_DMA &&
            args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
-               DRM_ERROR("Usermode queue doesn't support IP type %u\n", args->in.ip_type);
+               drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
+                            args->in.ip_type);
                return -EINVAL;
        }
 
            (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
            (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
            !amdgpu_is_tmz(adev)) {
-               drm_err(adev_to_drm(adev), "Secure only supported on GFX/Compute queues\n");
+               drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
                return -EINVAL;
        }
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               dev_err(adev->dev, "pm_runtime_get_sync() failed for userqueue create\n");
+               drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
                pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        uq_funcs = adev->userq_funcs[args->in.ip_type];
        if (!uq_funcs) {
-               DRM_ERROR("Usermode queue is not supported for this IP (%u)\n", args->in.ip_type);
+               drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
+                            args->in.ip_type);
                r = -EINVAL;
                goto unlock;
        }
 
        queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
        if (!queue) {
-               DRM_ERROR("Failed to allocate memory for queue\n");
+               drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
                r = -ENOMEM;
                goto unlock;
        }
        /* Convert relative doorbell offset into absolute doorbell index */
        index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
        if (index == (uint64_t)-EINVAL) {
-               DRM_ERROR("Failed to get doorbell for queue\n");
+               drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
                kfree(queue);
                goto unlock;
        }
        xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
        r = amdgpu_userq_fence_driver_alloc(adev, queue);
        if (r) {
-               DRM_ERROR("Failed to alloc fence driver\n");
+               drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
                goto unlock;
        }
 
        r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
        if (r) {
-               DRM_ERROR("Failed to create Queue\n");
+               drm_file_err(uq_mgr->file, "Failed to create Queue\n");
                amdgpu_userq_fence_driver_free(queue);
                kfree(queue);
                goto unlock;
 
        qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
        if (qid < 0) {
-               DRM_ERROR("Failed to allocate a queue id\n");
+               drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
                amdgpu_userq_fence_driver_free(queue);
                uq_funcs->mqd_destroy(uq_mgr, queue);
                kfree(queue);
                r = amdgpu_userq_map_helper(uq_mgr, queue);
                if (r) {
                        mutex_unlock(&adev->userq_mutex);
-                       DRM_ERROR("Failed to map Queue\n");
+                       drm_file_err(uq_mgr->file, "Failed to map Queue\n");
                        idr_remove(&uq_mgr->userq_idr, qid);
                        amdgpu_userq_fence_driver_free(queue);
                        uq_funcs->mqd_destroy(uq_mgr, queue);
                        return -EINVAL;
                r = amdgpu_userq_create(filp, args);
                if (r)
-                       DRM_ERROR("Failed to create usermode queue\n");
+                       drm_file_err(filp, "Failed to create usermode queue\n");
                break;
 
        case AMDGPU_USERQ_OP_FREE:
                        return -EINVAL;
                r = amdgpu_userq_destroy(filp, args->in.queue_id);
                if (r)
-                       DRM_ERROR("Failed to destroy usermode queue\n");
+                       drm_file_err(filp, "Failed to destroy usermode queue\n");
                break;
 
        default:
 static int
 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
 {
-       struct amdgpu_device *adev = uq_mgr->adev;
        struct amdgpu_usermode_queue *queue;
        int queue_id;
        int ret = 0, r;
        }
 
        if (ret)
-               dev_err(adev->dev, "Failed to map all the queues\n");
+               drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
        return ret;
 }
 
                ret = amdgpu_vm_lock_pd(vm, &exec, 2);
                drm_exec_retry_on_contention(&exec);
                if (unlikely(ret)) {
-                       DRM_ERROR("Failed to lock PD\n");
+                       drm_file_err(uq_mgr->file, "Failed to lock PD\n");
                        goto unlock_all;
                }
 
                bo = bo_va->base.bo;
                ret = amdgpu_userq_validate_vm_bo(NULL, bo);
                if (ret) {
-                       DRM_ERROR("Failed to validate BO\n");
+                       drm_file_err(uq_mgr->file, "Failed to validate BO\n");
                        goto unlock_all;
                }
 
 
        ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
        if (ret)
-               DRM_ERROR("Failed to replace eviction fence\n");
+               drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
 
 unlock_all:
        drm_exec_fini(&exec);
 
        ret = amdgpu_userq_validate_bos(uq_mgr);
        if (ret) {
-               DRM_ERROR("Failed to validate BOs to restore\n");
+               drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
                goto unlock;
        }
 
        ret = amdgpu_userq_restore_all(uq_mgr);
        if (ret) {
-               DRM_ERROR("Failed to restore all queues\n");
+               drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
                goto unlock;
        }
 
 static int
 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
 {
-       struct amdgpu_device *adev = uq_mgr->adev;
        struct amdgpu_usermode_queue *queue;
        int queue_id;
        int ret = 0, r;
        }
 
        if (ret)
-               dev_err(adev->dev, "Couldn't unmap all the queues\n");
+               drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
        return ret;
 }
 
        /* Wait for any pending userqueue fence work to finish */
        ret = amdgpu_userq_wait_for_signal(uq_mgr);
        if (ret) {
-               DRM_ERROR("Not evicting userqueue, timeout waiting for work\n");
+               drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
                return;
        }
 
        ret = amdgpu_userq_evict_all(uq_mgr);
        if (ret) {
-               DRM_ERROR("Failed to evict userqueue\n");
+               drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
                return;
        }