Update ref_cnt before ctx free.
Signed-off-by: James Zhu <James.Zhu@amd.com>
Acked-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
        return r;
 }
 
-static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
+static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_device *adev,
+                                 struct amdgpu_ctx_entity *entity)
 {
        ktime_t res = ns_to_ktime(0);
        int i;
                dma_fence_put(entity->fences[i]);
        }
 
+       amdgpu_xcp_release_sched(adev, entity);
+
        kfree(entity);
        return res;
 }
                for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
                        ktime_t spend;
 
-                       spend = amdgpu_ctx_fini_entity(ctx->entities[i][j]);
+                       spend = amdgpu_ctx_fini_entity(adev, ctx->entities[i][j]);
                        atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
                }
        }
 
        return 0;
 }
 
+void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
+                                 struct amdgpu_ctx_entity *entity)
+{
+       struct drm_gpu_scheduler *sched;
+       struct amdgpu_ring *ring;
+
+       if (!adev->xcp_mgr)
+               return;
+
+       sched = entity->entity.rq->sched;
+       if (sched->ready) {
+               ring = to_amdgpu_ring(entity->entity.rq->sched);
+               atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
+       }
+}
+
 
 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
                           struct amdgpu_fpriv *fpriv,
                           struct drm_file *file_priv);
+void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
+                             struct amdgpu_ctx_entity *entity);
 
 #define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
        ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \