/* Mark all executing requests as skipped. */
        list_for_each_entry(rq, &engine->active.requests, sched.link)
-               i915_request_mark_eio(rq);
+               i915_request_put(i915_request_mark_eio(rq));
        intel_engine_signal_breadcrumbs(engine);
 
        /* Flush the queued requests to the timeline list (for retiring). */
                struct i915_priolist *p = to_priolist(rb);
 
                priolist_for_each_request_consume(rq, rn, p) {
-                       i915_request_mark_eio(rq);
-                       __i915_request_submit(rq);
+                       if (i915_request_mark_eio(rq)) {
+                               __i915_request_submit(rq);
+                               i915_request_put(rq);
+                       }
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
 
        /* On-hold requests will be flushed to timeline upon their release */
        list_for_each_entry(rq, &engine->active.hold, sched.link)
-               i915_request_mark_eio(rq);
+               i915_request_put(i915_request_mark_eio(rq));
 
        /* Cancel all attached virtual engines */
        while ((rb = rb_first_cached(&execlists->virtual))) {
                spin_lock(&ve->base.active.lock);
                rq = fetch_and_zero(&ve->request);
                if (rq) {
-                       i915_request_mark_eio(rq);
-
-                       rq->engine = engine;
-                       __i915_request_submit(rq);
+                       if (i915_request_mark_eio(rq)) {
+                               rq->engine = engine;
+                               __i915_request_submit(rq);
+                               i915_request_put(rq);
+                       }
                        i915_request_put(rq);
 
                        ve->base.execlists.queue_priority_hint = INT_MIN;
 
 
 static void nop_submit_request(struct i915_request *request)
 {
-       struct intel_engine_cs *engine = request->engine;
-       unsigned long flags;
-
        RQ_TRACE(request, "-EIO\n");
-       i915_request_set_error_once(request, -EIO);
 
-       spin_lock_irqsave(&engine->active.lock, flags);
-       __i915_request_submit(request);
-       i915_request_mark_complete(request);
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       request = i915_request_mark_eio(request);
+       if (request) {
+               i915_request_submit(request);
+               intel_engine_signal_breadcrumbs(request->engine);
 
-       intel_engine_signal_breadcrumbs(engine);
+               i915_request_put(request);
+       }
 }
 
 static void __intel_gt_set_wedged(struct intel_gt *gt)
 
 
        /* Mark all submitted requests as skipped. */
        list_for_each_entry(request, &engine->active.requests, sched.link)
-               i915_request_mark_eio(request);
+               i915_request_put(i915_request_mark_eio(request));
        intel_engine_signal_breadcrumbs(engine);
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
 
        /* Mark all submitted requests as skipped. */
        list_for_each_entry(rq, &engine->active.requests, sched.link)
-               i915_request_mark_eio(rq);
+               i915_request_put(i915_request_mark_eio(rq));
        intel_engine_signal_breadcrumbs(engine);
 
        /* Cancel and submit all pending requests. */
        list_for_each_entry(rq, &mock->hw_queue, mock.link) {
-               i915_request_mark_eio(rq);
-               __i915_request_submit(rq);
+               if (i915_request_mark_eio(rq)) {
+                       __i915_request_submit(rq);
+                       i915_request_put(rq);
+               }
        }
        INIT_LIST_HEAD(&mock->hw_queue);
 
 
        } while (!try_cmpxchg(&rq->fence.error, &old, error));
 }
 
-void i915_request_mark_eio(struct i915_request *rq)
+struct i915_request *i915_request_mark_eio(struct i915_request *rq)
 {
        if (__i915_request_is_complete(rq))
-               return;
+               return NULL;
 
        GEM_BUG_ON(i915_request_signaled(rq));
 
+       /* As soon as the request is completed, it may be retired */
+       rq = i915_request_get(rq);
+
        i915_request_set_error_once(rq, -EIO);
        i915_request_mark_complete(rq);
+
+       return rq;
 }
 
 bool __i915_request_submit(struct i915_request *request)
 
 
 void __i915_request_skip(struct i915_request *rq);
 void i915_request_set_error_once(struct i915_request *rq, int error);
-void i915_request_mark_eio(struct i915_request *rq);
+struct i915_request *i915_request_mark_eio(struct i915_request *rq);
 
 struct i915_request *__i915_request_commit(struct i915_request *request);
 void __i915_request_queue(struct i915_request *rq,