q->ring_ops->emit_job(job);
xe_execlist_make_active(exl);
- return dma_fence_get(job->fence);
+ return job->fence;
}
static void execlist_job_free(struct drm_sched_job *drm_job)
struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
+ struct dma_fence *fence = NULL;
bool lr = xe_exec_queue_is_lr(q);
xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
if (lr) {
xe_sched_job_set_error(job, -EOPNOTSUPP);
- return NULL;
- } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) {
- return job->fence;
+ dma_fence_put(job->fence); /* Drop ref from xe_sched_job_arm */
} else {
- return dma_fence_get(job->fence);
+ fence = job->fence;
}
+
+ return fence;
}
static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
* @fence: dma fence to indicate completion. 1 way relationship - job
* can safely reference fence, fence cannot safely reference job.
*/
-#define JOB_FLAG_SUBMIT DMA_FENCE_FLAG_USER_BITS
struct dma_fence *fence;
/** @user_fence: write back value when BB is complete */
struct {