return container_of(engine, struct virtual_engine, base);
 }
 
-static void mark_eio(struct i915_request *rq)
-{
-       if (__i915_request_is_complete(rq))
-               return;
-
-       GEM_BUG_ON(i915_request_signaled(rq));
-
-       i915_request_set_error_once(rq, -EIO);
-       i915_request_mark_complete(rq);
-}
-
 static struct i915_request *
 __active_request(const struct intel_timeline * const tl,
                 struct i915_request *rq,
 
        /* Mark all executing requests as skipped. */
        list_for_each_entry(rq, &engine->active.requests, sched.link)
-               mark_eio(rq);
+               i915_request_mark_eio(rq);
        intel_engine_signal_breadcrumbs(engine);
 
        /* Flush the queued requests to the timeline list (for retiring). */
                int i;
 
                priolist_for_each_request_consume(rq, rn, p, i) {
-                       mark_eio(rq);
+                       i915_request_mark_eio(rq);
                        __i915_request_submit(rq);
                }
 
 
        /* On-hold requests will be flushed to timeline upon their release */
        list_for_each_entry(rq, &engine->active.hold, sched.link)
-               mark_eio(rq);
+               i915_request_mark_eio(rq);
 
        /* Cancel all attached virtual engines */
        while ((rb = rb_first_cached(&execlists->virtual))) {
                spin_lock(&ve->base.active.lock);
                rq = fetch_and_zero(&ve->request);
                if (rq) {
-                       mark_eio(rq);
+                       i915_request_mark_eio(rq);
 
                        rq->engine = engine;
                        __i915_request_submit(rq);
 
        spin_lock_irqsave(&engine->active.lock, flags);
 
        /* Mark all submitted requests as skipped. */
-       list_for_each_entry(request, &engine->active.requests, sched.link) {
-               i915_request_set_error_once(request, -EIO);
-               i915_request_mark_complete(request);
-       }
+       list_for_each_entry(request, &engine->active.requests, sched.link)
+               i915_request_mark_eio(request);
        intel_engine_signal_breadcrumbs(engine);
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
        GEM_BUG_ON(stalled);
 }
 
-static void mark_eio(struct i915_request *rq)
-{
-       if (i915_request_completed(rq))
-               return;
-
-       GEM_BUG_ON(i915_request_signaled(rq));
-
-       i915_request_set_error_once(rq, -EIO);
-       i915_request_mark_complete(rq);
-}
-
 static void mock_reset_cancel(struct intel_engine_cs *engine)
 {
        struct mock_engine *mock =
 
        /* Mark all submitted requests as skipped. */
        list_for_each_entry(rq, &engine->active.requests, sched.link)
-               mark_eio(rq);
+               i915_request_mark_eio(rq);
        intel_engine_signal_breadcrumbs(engine);
 
        /* Cancel and submit all pending requests. */
        list_for_each_entry(rq, &mock->hw_queue, mock.link) {
-               mark_eio(rq);
+               i915_request_mark_eio(rq);
                __i915_request_submit(rq);
        }
        INIT_LIST_HEAD(&mock->hw_queue);
 
        } while (!try_cmpxchg(&rq->fence.error, &old, error));
 }
 
+void i915_request_mark_eio(struct i915_request *rq)
+{
+       if (__i915_request_is_complete(rq))
+               return;
+
+       GEM_BUG_ON(i915_request_signaled(rq));
+
+       i915_request_set_error_once(rq, -EIO);
+       i915_request_mark_complete(rq);
+}
+
 bool __i915_request_submit(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
 
 struct i915_request * __must_check
 i915_request_create(struct intel_context *ce);
 
-void i915_request_set_error_once(struct i915_request *rq, int error);
 void __i915_request_skip(struct i915_request *rq);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void i915_request_mark_eio(struct i915_request *rq);
 
 struct i915_request *__i915_request_commit(struct i915_request *request);
 void __i915_request_queue(struct i915_request *rq,