return engine->class != RENDER_CLASS;
 }
 
+static void kick_execlists(const struct i915_request *rq, int prio)
+{
+       struct intel_engine_cs *engine = rq->engine;
+       struct i915_sched_engine *sched_engine = engine->sched_engine;
+       const struct i915_request *inflight;
+
+       /*
+        * We only need to kick the tasklet once for the high priority
+        * new context we add into the queue.
+        */
+       if (prio <= sched_engine->queue_priority_hint)
+               return;
+
+       rcu_read_lock();
+
+       /* Nothing currently active? We're overdue for a submission! */
+       inflight = execlists_active(&engine->execlists);
+       if (!inflight)
+               goto unlock;
+
+       /*
+        * If we are already the currently executing context, don't
+        * bother evaluating if we should preempt ourselves.
+        */
+       if (inflight->context == rq->context)
+               goto unlock;
+
+       ENGINE_TRACE(engine,
+                    "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
+                    prio,
+                    rq->fence.context, rq->fence.seqno,
+                    inflight->fence.context, inflight->fence.seqno,
+                    inflight->sched.attr.priority);
+
+       sched_engine->queue_priority_hint = prio;
+
+       /*
+        * Allow preemption of low -> normal -> high, but we do
+        * not allow low priority tasks to preempt other low priority
+        * tasks under the impression that latency for low priority
+        * tasks does not matter (as much as background throughput),
+        * so kiss.
+        */
+       if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
+               tasklet_hi_schedule(&engine->execlists.tasklet);
+
+unlock:
+       rcu_read_unlock();
+}
+
 static void execlists_set_default_submission(struct intel_engine_cs *engine)
 {
        engine->submit_request = execlists_submit_request;
        engine->sched_engine->schedule = i915_schedule;
+       engine->sched_engine->kick_backend = kick_execlists;
        engine->execlists.tasklet.callback = execlists_submission_tasklet;
 }
 
        ve->base.request_alloc = execlists_request_alloc;
 
        ve->base.sched_engine->schedule = i915_schedule;
+       ve->base.sched_engine->kick_backend = kick_execlists;
        ve->base.submit_request = virtual_submit_request;
        ve->base.bond_execute = virtual_bond_execute;
 
 
        return locked;
 }
 
-static inline int rq_prio(const struct i915_request *rq)
-{
-       return rq->sched.attr.priority;
-}
-
-static inline bool need_preempt(int prio, int active)
-{
-       /*
-        * Allow preemption of low -> normal -> high, but we do
-        * not allow low priority tasks to preempt other low priority
-        * tasks under the impression that latency for low priority
-        * tasks does not matter (as much as background throughput),
-        * so kiss.
-        */
-       return prio >= max(I915_PRIORITY_NORMAL, active);
-}
-
-static void kick_submission(struct intel_engine_cs *engine,
-                           const struct i915_request *rq,
-                           int prio)
-{
-       const struct i915_request *inflight;
-
-       /*
-        * We only need to kick the tasklet once for the high priority
-        * new context we add into the queue.
-        */
-       if (prio <= engine->sched_engine->queue_priority_hint)
-               return;
-
-       rcu_read_lock();
-
-       /* Nothing currently active? We're overdue for a submission! */
-       inflight = execlists_active(&engine->execlists);
-       if (!inflight)
-               goto unlock;
-
-       /*
-        * If we are already the currently executing context, don't
-        * bother evaluating if we should preempt ourselves.
-        */
-       if (inflight->context == rq->context)
-               goto unlock;
-
-       ENGINE_TRACE(engine,
-                    "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
-                    prio,
-                    rq->fence.context, rq->fence.seqno,
-                    inflight->fence.context, inflight->fence.seqno,
-                    inflight->sched.attr.priority);
-
-       engine->sched_engine->queue_priority_hint = prio;
-       if (need_preempt(prio, rq_prio(inflight)))
-               tasklet_hi_schedule(&engine->execlists.tasklet);
-
-unlock:
-       rcu_read_unlock();
-}
-
 static void __i915_schedule(struct i915_sched_node *node,
                            const struct i915_sched_attr *attr)
 {
                }
 
                /* Defer (tasklet) submission until after all of our updates. */
-               kick_submission(engine, node_to_request(node), prio);
+               if (engine->sched_engine->kick_backend)
+                       engine->sched_engine->kick_backend(node_to_request(node), prio);
        }
 
        spin_unlock(&engine->sched_engine->lock);