void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
 
-static inline bool __execlists_need_preempt(int prio, int last)
-{
-       /*
-        * Allow preemption of low -> normal -> high, but we do
-        * not allow low priority tasks to preempt other low priority
-        * tasks under the impression that latency for low priority
-        * tasks does not matter (as much as background throughput),
-        * so kiss.
-        *
-        * More naturally we would write
-        *      prio >= max(0, last);
-        * except that we wish to prevent triggering preemption at the same
-        * priority level: the task that is running should remain running
-        * to preserve FIFO ordering of dependencies.
-        */
-       return prio > max(I915_PRIORITY_NORMAL - 1, last);
-}
-
 static inline void
 execlists_set_active(struct intel_engine_execlists *execlists,
                     unsigned int bit)
 
         * ourselves, ignore the request.
         */
        last_prio = effective_prio(rq);
-       if (!__execlists_need_preempt(engine->execlists.queue_priority_hint,
-                                     last_prio))
+       if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint,
+                                        last_prio))
                return false;
 
        /*
 
        GEM_BUG_ON(i915_request_completed(rq));
 
        i915_sw_fence_init(&rq->submit, dummy_notify);
-       i915_sw_fence_commit(&rq->submit);
+       set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 
        return rq;
 }
 
 static void dummy_request_free(struct i915_request *dummy)
 {
+       /* We have to fake the CS interrupt to kick the next request */
+       i915_sw_fence_commit(&dummy->submit);
+
        i915_request_mark_complete(dummy);
+       dma_fence_signal(&dummy->fence);
+
        i915_sched_node_fini(&dummy->sched);
        i915_sw_fence_fini(&dummy->submit);
 
 
        if (flags & I915_WAIT_PRIORITY) {
                if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
                        gen6_rps_boost(rq);
-               local_bh_disable(); /* suspend tasklets for reprioritisation */
                i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
-               local_bh_enable(); /* kick tasklets en masse */
        }
 
        wait.tsk = current;
 
        return engine;
 }
 
-static bool inflight(const struct i915_request *rq,
-                    const struct intel_engine_cs *engine)
+static inline int rq_prio(const struct i915_request *rq)
 {
-       const struct i915_request *active;
+       return rq->sched.attr.priority | __NO_PREEMPTION;
+}
+
+static void kick_submission(struct intel_engine_cs *engine, int prio)
+{
+       const struct i915_request *inflight =
+               port_request(engine->execlists.port);
 
-       if (!i915_request_is_active(rq))
-               return false;
+       /*
+        * If we are already the currently executing context, don't
+        * bother evaluating if we should preempt ourselves, or if
+        * we expect nothing to change as a result of running the
+        * tasklet, i.e. we have not change the priority queue
+        * sufficiently to oust the running context.
+        */
+       if (inflight && !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
+               return;
 
-       active = port_request(engine->execlists.port);
-       return active->hw_context == rq->hw_context;
+       tasklet_hi_schedule(&engine->execlists.tasklet);
 }
 
 static void __i915_schedule(struct i915_request *rq,
 
                engine->execlists.queue_priority_hint = prio;
 
-               /*
-                * If we are already the currently executing context, don't
-                * bother evaluating if we should preempt ourselves.
-                */
-               if (inflight(node_to_request(node), engine))
-                       continue;
-
                /* Defer (tasklet) submission until after all of our updates. */
-               tasklet_hi_schedule(&engine->execlists.tasklet);
+               kick_submission(engine, prio);
        }
 
        spin_unlock(&engine->timeline.lock);
 
                __i915_priolist_free(p);
 }
 
+static inline bool i915_scheduler_need_preempt(int prio, int active)
+{
+       /*
+        * Allow preemption of low -> normal -> high, but we do
+        * not allow low priority tasks to preempt other low priority
+        * tasks under the impression that latency for low priority
+        * tasks does not matter (as much as background throughput),
+        * so kiss.
+        *
+        * More naturally we would write
+        *      prio >= max(0, last);
+        * except that we wish to prevent triggering preemption at the same
+        * priority level: the task that is running should remain running
+        * to preserve FIFO ordering of dependencies.
+        */
+       return prio > max(I915_PRIORITY_NORMAL - 1, active);
+}
+
 #endif /* _I915_SCHEDULER_H_ */
 
                                &engine->i915->guc.preempt_work[engine->id];
                        int prio = execlists->queue_priority_hint;
 
-                       if (__execlists_need_preempt(prio, port_prio(port))) {
+                       if (i915_scheduler_need_preempt(prio,
+                                                       port_prio(port))) {
                                execlists_set_active(execlists,
                                                     EXECLISTS_ACTIVE_PREEMPT);
                                queue_work(engine->i915->guc.preempt_wq,