{
struct nouveau_sched *sched = job->sched;
- spin_lock(&sched->job.list.lock);
+ spin_lock(&sched->job_list.lock);
list_del(&job->entry);
- spin_unlock(&sched->job.list.lock);
-
- wake_up(&sched->job.wq);
+ spin_unlock(&sched->job_list.lock);
}
void
}
/* Submit was successful; add the job to the schedulers job list. */
- spin_lock(&sched->job.list.lock);
- list_add(&job->entry, &sched->job.list.head);
- spin_unlock(&sched->job.list.lock);
+ spin_lock(&sched->job_list.lock);
+ list_add(&job->entry, &sched->job_list.head);
+ spin_unlock(&sched->job_list.lock);
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
goto fail_sched;
mutex_init(&sched->mutex);
- spin_lock_init(&sched->job.list.lock);
- INIT_LIST_HEAD(&sched->job.list.head);
- init_waitqueue_head(&sched->job.wq);
+ spin_lock_init(&sched->job_list.lock);
+ INIT_LIST_HEAD(&sched->job_list.head);
return 0;
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- rmb(); /* for list_empty to work without lock */
- wait_event(sched->job.wq, list_empty(&sched->job.list.head));
-
drm_sched_entity_fini(entity);
drm_sched_fini(drm_sched);
u64 end = addr + range;
again:
- spin_lock(&sched->job.list.lock);
- list_for_each_entry(__job, &sched->job.list.head, entry) {
+ spin_lock(&sched->job_list.lock);
+ list_for_each_entry(__job, &sched->job_list.head, entry) {
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job);
list_for_each_op(op, &bind_job->ops) {
if (!(end <= op_addr || addr >= op_end)) {
nouveau_uvmm_bind_job_get(bind_job);
- spin_unlock(&sched->job.list.lock);
+ spin_unlock(&sched->job_list.lock);
wait_for_completion(&bind_job->complete);
nouveau_uvmm_bind_job_put(bind_job);
goto again;
}
}
}
- spin_unlock(&sched->job.list.lock);
+ spin_unlock(&sched->job_list.lock);
}
static int