if (xe_exec_queue_is_parallel(q)) {
q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
- q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
+ q->parallel.composite_fence_seqno = 0;
}
return q;
return DRM_GPU_SCHED_STAT_NOMINAL;
}
- drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
- xe_sched_job_seqno(job), q->guc->id, q->flags);
+ drm_notice(&xe->drm, "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id, q->flags);
xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
"Kernel-submitted job timed out\n");
xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
__emit_job_gen12_simple(job, job->q->lrc,
job->batch_addr[0],
- xe_sched_job_seqno(job));
+ xe_sched_job_lrc_seqno(job));
}
static void emit_job_gen12_copy(struct xe_sched_job *job)
if (xe_sched_job_is_migration(job->q)) {
emit_migration_job_gen12(job, job->q->lrc,
- xe_sched_job_seqno(job));
+ xe_sched_job_lrc_seqno(job));
return;
}
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_simple(job, job->q->lrc + i,
- job->batch_addr[i],
- xe_sched_job_seqno(job));
+ job->batch_addr[i],
+ xe_sched_job_lrc_seqno(job));
}
static void emit_job_gen12_video(struct xe_sched_job *job)
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_video(job, job->q->lrc + i,
job->batch_addr[i],
- xe_sched_job_seqno(job));
+ xe_sched_job_lrc_seqno(job));
}
static void emit_job_gen12_render_compute(struct xe_sched_job *job)
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_render_compute(job, job->q->lrc + i,
job->batch_addr[i],
- xe_sched_job_seqno(job));
+ xe_sched_job_lrc_seqno(job));
}
static const struct xe_ring_ops ring_ops_gen12_gsc = {
err = PTR_ERR(job->fence);
goto err_sched_job;
}
+ job->lrc_seqno = job->fence->seqno;
} else {
struct dma_fence_array *cf;
err = PTR_ERR(fences[j]);
goto err_fences;
}
+ if (!j)
+ job->lrc_seqno = fences[0]->seqno;
}
cf = dma_fence_array_create(q->width, fences,
goto err_fences;
}
- /* Sanity check */
- for (j = 0; j < q->width; ++j)
- xe_assert(job_to_xe(job), cf->base.seqno == fences[j]->seqno);
-
job->fence = &cf->base;
}
{
struct xe_lrc *lrc = job->q->lrc;
- return !__dma_fence_is_later(xe_sched_job_seqno(job),
+ return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
xe_lrc_start_seqno(lrc),
- job->fence->ops);
+ dma_fence_array_first(job->fence)->ops);
}
bool xe_sched_job_completed(struct xe_sched_job *job)
* parallel handshake is done.
*/
- return !__dma_fence_is_later(xe_sched_job_seqno(job), xe_lrc_seqno(lrc),
- job->fence->ops);
+ return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
+ xe_lrc_seqno(lrc),
+ dma_fence_array_first(job->fence)->ops);
}
void xe_sched_job_arm(struct xe_sched_job *job)
return job->fence->seqno;
}
+static inline u32 xe_sched_job_lrc_seqno(struct xe_sched_job *job)
+{
+ return job->lrc_seqno;
+}
+
static inline void
xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags)
{
/** @user_fence.value: write back value */
u64 value;
} user_fence;
+ /** @lrc_seqno: LRC seqno */
+ u32 lrc_seqno;
/** @migrate_flush_flags: Additional flush flags for migration jobs */
u32 migrate_flush_flags;
/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
TP_STRUCT__entry(
__field(u32, seqno)
+ __field(u32, lrc_seqno)
__field(u16, guc_id)
__field(u32, guc_state)
__field(u32, flags)
TP_fast_assign(
__entry->seqno = xe_sched_job_seqno(job);
+ __entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
__entry->guc_id = job->q->guc->id;
__entry->guc_state =
atomic_read(&job->q->guc->state);
__entry->batch_addr = (u64)job->batch_addr[0];
),
- TP_printk("fence=%p, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
- __entry->fence, __entry->seqno, __entry->guc_id,
+ TP_printk("fence=%p, seqno=%u, lrc_seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
+ __entry->fence, __entry->seqno,
+ __entry->lrc_seqno, __entry->guc_id,
__entry->batch_addr, __entry->guc_state,
__entry->flags, __entry->error)
);