#include "xe_guc_ct.h"
 #include "xe_guc_submit.h"
 #include "xe_hw_engine.h"
+#include "xe_sched_job.h"
 
 /**
  * DOC: Xe device coredump
 }
 
 static void devcoredump_snapshot(struct xe_devcoredump *coredump,
-                                struct xe_exec_queue *q)
+                                struct xe_sched_job *job)
 {
        struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
+       struct xe_exec_queue *q = job->q;
        struct xe_guc *guc = exec_queue_to_guc(q);
        struct xe_hw_engine *hwe;
        enum xe_hw_engine_id id;
        xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
 
        coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
-       coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
+       coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(job);
 
        for_each_hw_engine(hwe, q->gt, id) {
                if (hwe->class != q->hwe->class ||
 
 /**
  * xe_devcoredump - Take the required snapshots and initialize coredump device.
- * @q: The faulty xe_exec_queue, where the issue was detected.
+ * @job: The faulty xe_sched_job, where the issue was detected.
  *
  * This function should be called at the crash time within the serialized
  * gt_reset. It is skipped if we still have the core dump device available
  * with the information of the 'first' snapshot.
  */
-void xe_devcoredump(struct xe_exec_queue *q)
+void xe_devcoredump(struct xe_sched_job *job)
 {
-       struct xe_device *xe = gt_to_xe(q->gt);
+       struct xe_device *xe = gt_to_xe(job->q->gt);
        struct xe_devcoredump *coredump = &xe->devcoredump;
 
        if (coredump->captured) {
        }
 
        coredump->captured = true;
-       devcoredump_snapshot(coredump, q);
+       devcoredump_snapshot(coredump, job);
 
        drm_info(&xe->drm, "Xe device coredump has been created\n");
        drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
 
                drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
                           xe_sched_job_seqno(job), q->guc->id, q->flags);
                simple_error_capture(q);
-               xe_devcoredump(q);
+               xe_devcoredump(job);
        } else {
                drm_dbg(&xe->drm, "Timedout signaled job: seqno=%u, guc_id=%d, flags=0x%lx",
                         xe_sched_job_seqno(job), q->guc->id, q->flags);
 
 /**
  * xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
- * @q: Xe exec queue.
+ * @job: faulty Xe scheduled job.
  *
  * This can be printed out in a later stage like during dev_coredump
  * analysis.
  * caller, using `xe_guc_exec_queue_snapshot_free`.
  */
 struct xe_guc_submit_exec_queue_snapshot *
-xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
+xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job)
 {
+       struct xe_exec_queue *q = job->q;
        struct xe_guc *guc = exec_queue_to_guc(q);
        struct xe_device *xe = guc_to_xe(guc);
        struct xe_gpu_scheduler *sched = &q->guc->sched;
-       struct xe_sched_job *job;
        struct xe_guc_submit_exec_queue_snapshot *snapshot;
        int i;
 
        if (!snapshot->pending_list) {
                drm_err(&xe->drm, "Skipping GuC Engine pending_list snapshot.\n");
        } else {
+               struct xe_sched_job *job_iter;
+
                i = 0;
-               list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+               list_for_each_entry(job_iter, &sched->base.pending_list, drm.list) {
                        snapshot->pending_list[i].seqno =
-                               xe_sched_job_seqno(job);
+                               xe_sched_job_seqno(job_iter);
                        snapshot->pending_list[i].fence =
-                               dma_fence_is_signaled(job->fence) ? 1 : 0;
+                               dma_fence_is_signaled(job_iter->fence) ? 1 : 0;
                        snapshot->pending_list[i].finished =
-                               dma_fence_is_signaled(&job->drm.s_fence->finished)
+                               dma_fence_is_signaled(&job_iter->drm.s_fence->finished)
                                ? 1 : 0;
                        i++;
                }
 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
 {
        struct xe_guc_submit_exec_queue_snapshot *snapshot;
+       struct xe_gpu_scheduler *sched = &q->guc->sched;
+       struct xe_sched_job *job;
+       bool found = false;
+
+       spin_lock(&sched->base.job_list_lock);
+       list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+               if (job->q == q) {
+                       xe_sched_job_get(job);
+                       found = true;
+                       break;
+               }
+       }
+       spin_unlock(&sched->base.job_list_lock);
 
-       snapshot = xe_guc_exec_queue_snapshot_capture(q);
+       if (!found)
+               return;
+
+       snapshot = xe_guc_exec_queue_snapshot_capture(job);
        xe_guc_exec_queue_snapshot_print(snapshot, p);
        xe_guc_exec_queue_snapshot_free(snapshot);
+
+       xe_sched_job_put(job);
 }
 
 /**