struct xe_exec_queue *q = ge->q;
        struct xe_guc *guc = exec_queue_to_guc(q);
        struct xe_gpu_scheduler *sched = &ge->sched;
-       bool wedged;
+       bool wedged = false;
 
        xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
        trace_xe_exec_queue_lr_cleanup(q);
 
-       wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
+       if (!exec_queue_killed(q))
+               wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
 
        /* Kill the run_job / process_msg entry points */
        xe_sched_submission_stop(sched);
        int err = -ETIME;
        pid_t pid = -1;
        int i = 0;
-       bool wedged, skip_timeout_check;
+       bool wedged = false, skip_timeout_check;
 
        /*
         * TDR has fired before free job worker. Common if exec queue
         * doesn't work for SRIOV. For now assuming timeouts in wedged mode are
         * genuine timeouts.
         */
-       wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
+       if (!exec_queue_killed(q))
+               wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
 
        /* Engine state now stable, disable scheduling to check timestamp */
        if (!wedged && exec_queue_registered(q)) {