/* For killed process disable any more IBs enqueue right now */
        last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
        if ((!last_user || last_user == current->group_leader) &&
-           (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+           (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
+               spin_lock(&entity->rq_lock);
+               entity->stopped = true;
                drm_sched_rq_remove_entity(entity->rq, entity);
+               spin_unlock(&entity->rq_lock);
+       }
 
        return ret;
 }
        if (first) {
                /* Add the entity to the run queue */
                spin_lock(&entity->rq_lock);
+               if (entity->stopped) {
+                       spin_unlock(&entity->rq_lock);
+
+                       DRM_ERROR("Trying to push to a killed entity\n");
+                       return;
+               }
                drm_sched_rq_add_entity(entity->rq, entity);
                spin_unlock(&entity->rq_lock);
                drm_sched_wakeup(entity->rq->sched);
 
  * @fini_status: contains the exit status in case the process was signalled.
  * @last_scheduled: points to the finished fence of the last scheduled job.
  * @last_user: last group leader pushing a job into the entity.
+ * @stopped: Marks the enity as removed from rq and destined for termination.
  *
  * Entities will emit jobs in order to their corresponding hardware
  * ring, and the scheduler will alternate between entities based on
        atomic_t                        *guilty;
        struct dma_fence                *last_scheduled;
        struct task_struct              *last_user;
+       bool                            stopped;
 };
 
 /**