The driver shouldn't mess with the scheduler internals.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Monk.Liu <monk.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
                      struct amd_sched_entity *entity, void *owner,
                      struct fence **f);
-void amdgpu_job_timeout_func(struct work_struct *work);
 
 struct amdgpu_ring {
        struct amdgpu_device            *adev;
 
        p->job = NULL;
 
        r = amd_sched_job_init(&job->base, &ring->sched,
-                              entity, amdgpu_job_timeout_func,
-                              amdgpu_job_free_func,
+                              entity, amdgpu_job_free_func,
                               p->filp, &fence);
        if (r) {
                amdgpu_job_free(job);
 
        amd_sched_job_put(&job->base);
 }
 
-void amdgpu_job_timeout_func(struct work_struct *work)
+static void amdgpu_job_timedout(struct amd_sched_job *s_job)
 {
-       struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
+       struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+
        DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
-                               job->base.sched->name,
-                               (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
-                               job->ring->fence_drv.sync_seq);
+                 job->base.sched->name,
+                 atomic_read(&job->ring->fence_drv.last_seq),
+                 job->ring->fence_drv.sync_seq);
 
        amd_sched_job_put(&job->base);
 }
                return -EINVAL;
 
        r = amd_sched_job_init(&job->base, &ring->sched,
-                              entity, amdgpu_job_timeout_func,
-                              amdgpu_job_free_func, owner, &fence);
+                              entity, amdgpu_job_free_func, owner, &fence);
        if (r)
                return r;
 
 const struct amd_sched_backend_ops amdgpu_sched_ops = {
        .dependency = amdgpu_job_dependency,
        .run_job = amdgpu_job_run,
+       .timedout_job = amdgpu_job_timedout,
 };
 
        }
 }
 
+static void amd_sched_job_timedout(struct work_struct *work)
+{
+       struct amd_sched_job *job = container_of(work, struct amd_sched_job,
+                                                work_tdr.work);
+
+       job->sched->ops->timedout_job(job);
+}
+
 /**
  * Submit a job to the job queue
  *
 int amd_sched_job_init(struct amd_sched_job *job,
                       struct amd_gpu_scheduler *sched,
                       struct amd_sched_entity *entity,
-                      void (*timeout_cb)(struct work_struct *work),
                       void (*free_cb)(struct kref *refcount),
                       void *owner, struct fence **fence)
 {
                return -ENOMEM;
 
        job->s_fence->s_job = job;
-       INIT_DELAYED_WORK(&job->work_tdr, timeout_cb);
+       INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
        job->free_callback = free_cb;
 
        if (fence)
 
 struct amd_sched_backend_ops {
        struct fence *(*dependency)(struct amd_sched_job *sched_job);
        struct fence *(*run_job)(struct amd_sched_job *sched_job);
+       void (*timedout_job)(struct amd_sched_job *sched_job);
 };
 
 enum amd_sched_priority {
 int amd_sched_job_init(struct amd_sched_job *job,
                       struct amd_gpu_scheduler *sched,
                       struct amd_sched_entity *entity,
-                      void (*timeout_cb)(struct work_struct *work),
                       void (*free_cb)(struct kref* refcount),
                       void *owner, struct fence **fence);
 static inline void amd_sched_job_get(struct amd_sched_job *job)