struct amdgpu_ctx *ctx;
        struct idr *idp;
        uint32_t id, i;
+       long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
 
        idp = &mgr->ctx_handles;
 
+       mutex_lock(&mgr->lock);
        idr_for_each_entry(idp, ctx, id) {
 
-               if (!ctx->adev)
+               if (!ctx->adev) {
+                       mutex_unlock(&mgr->lock);
                        return;
+               }
 
                for (i = 0; i < ctx->adev->num_rings; i++) {
 
                        if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
                                continue;
 
-                       if (kref_read(&ctx->refcount) == 1)
-                               drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
-                                                 &ctx->rings[i].entity);
-                       else
-                               DRM_ERROR("ctx %p is still alive\n", ctx);
+                       max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
+                                         &ctx->rings[i].entity, max_wait);
                }
        }
+       mutex_unlock(&mgr->lock);
 }
 
 void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
 
        .runtime_idle = amdgpu_pmops_runtime_idle,
 };
 
+static int amdgpu_flush(struct file *f, fl_owner_t id)
+{
+       struct drm_file *file_priv = f->private_data;
+       struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+       amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
+
+       return 0;
+}
+
+
 static const struct file_operations amdgpu_driver_kms_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
+       .flush = amdgpu_flush,
        .release = drm_release,
        .unlocked_ioctl = amdgpu_drm_ioctl,
        .mmap = amdgpu_mmap,
 
                return;
 
        pm_runtime_get_sync(dev->dev);
-       amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
 
        if (adev->asic_type != CHIP_RAVEN) {
                amdgpu_uvd_free_handles(adev, file_priv);