dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
 
        dma_fence_put(entity->last_scheduled);
+
        entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
 
+       /*
+        * If the queue is empty we allow drm_sched_entity_select_rq() to
+        * locklessly access ->last_scheduled. This only works if we set the
+        * pointer before we dequeue and if we a write barrier here.
+        */
+       smp_wmb();
+
        spsc_queue_pop(&entity->job_queue);
        return sched_job;
 }
        struct drm_gpu_scheduler *sched;
        struct drm_sched_rq *rq;
 
-       if (spsc_queue_count(&entity->job_queue) || !entity->sched_list)
+       /* single possible engine and already selected */
+       if (!entity->sched_list)
+               return;
+
+       /* queue non-empty, stay on the same engine */
+       if (spsc_queue_count(&entity->job_queue))
                return;
 
-       fence = READ_ONCE(entity->last_scheduled);
+       /*
+        * Only when the queue is empty are we guaranteed that the scheduler
+        * thread cannot change ->last_scheduled. To enforce ordering we need
+        * a read barrier here. See drm_sched_entity_pop_job() for the other
+        * side.
+        */
+       smp_rmb();
+
+       fence = entity->last_scheduled;
+
+       /* stay on the same engine if the previous job hasn't finished */
        if (fence && !dma_fence_is_signaled(fence))
                return;