entity->priority = priority;
        entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
        entity->last_scheduled = NULL;
+       RB_CLEAR_NODE(&entity->rb_tree_node);
 
        if(num_sched_list)
                entity->rq = &sched_list[0]->sched_rq[entity->priority];
        smp_wmb();
 
        spsc_queue_pop(&entity->job_queue);
+
+       /*
+        * Update the entity's location in the min heap according to
+        * the timestamp of the next job, if any.
+        */
+       if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
+               struct drm_sched_job *next;
+
+               next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+               if (next)
+                       drm_sched_rq_update_fifo(entity, next->submit_ts);
+       }
+
        return sched_job;
 }
 
        atomic_inc(entity->rq->sched->score);
        WRITE_ONCE(entity->last_user, current->group_leader);
        first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
+       sched_job->submit_ts = ktime_get();
 
        /* first job wakes up scheduler */
        if (first) {
                        DRM_ERROR("Trying to push to a killed entity\n");
                        return;
                }
+
                drm_sched_rq_add_entity(entity->rq, entity);
                spin_unlock(&entity->rq_lock);
+
+               if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+                       drm_sched_rq_update_fifo(entity, sched_job->submit_ts);
+
                drm_sched_wakeup(entity->rq->sched);
        }
 }
 
 #define to_drm_sched_job(sched_job)            \
                container_of((sched_job), struct drm_sched_job, queue_node)
 
+int drm_sched_policy = DRM_SCHED_POLICY_RR;
+
+/**
+ * DOC: sched_policy (int)
+ * Used to override default entities scheduling policy in a run queue.
+ */
+MODULE_PARM_DESC(sched_policy, "Specify schedule policy for entities on a runqueue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin (default), " __stringify(DRM_SCHED_POLICY_FIFO) " = use FIFO.");
+module_param_named(sched_policy, drm_sched_policy, int, 0444);
+
+static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
+                                                           const struct rb_node *b)
+{
+       struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
+       struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
+
+       return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
+}
+
+static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
+{
+       struct drm_sched_rq *rq = entity->rq;
+
+       if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
+               rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
+               RB_CLEAR_NODE(&entity->rb_tree_node);
+       }
+}
+
+void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
+{
+       /*
+        * Both locks need to be grabbed, one to protect from entity->rq change
+        * for entity from within concurrent drm_sched_entity_select_rq and the
+        * other to update the rb tree structure.
+        */
+       spin_lock(&entity->rq_lock);
+       spin_lock(&entity->rq->lock);
+
+       drm_sched_rq_remove_fifo_locked(entity);
+
+       entity->oldest_job_waiting = ts;
+
+       rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
+                     drm_sched_entity_compare_before);
+
+       spin_unlock(&entity->rq->lock);
+       spin_unlock(&entity->rq_lock);
+}
+
 /**
  * drm_sched_rq_init - initialize a given run queue struct
  *
 {
        spin_lock_init(&rq->lock);
        INIT_LIST_HEAD(&rq->entities);
+       rq->rb_tree_root = RB_ROOT_CACHED;
        rq->current_entity = NULL;
        rq->sched = sched;
 }
 {
        if (!list_empty(&entity->list))
                return;
+
        spin_lock(&rq->lock);
+
        atomic_inc(rq->sched->score);
        list_add_tail(&entity->list, &rq->entities);
+
        spin_unlock(&rq->lock);
 }
 
 {
        if (list_empty(&entity->list))
                return;
+
        spin_lock(&rq->lock);
+
        atomic_dec(rq->sched->score);
        list_del_init(&entity->list);
+
        if (rq->current_entity == entity)
                rq->current_entity = NULL;
+
+       if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+               drm_sched_rq_remove_fifo_locked(entity);
+
        spin_unlock(&rq->lock);
 }
 
 /**
- * drm_sched_rq_select_entity - Select an entity which could provide a job to run
+ * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
  *
  * @rq: scheduler run queue to check.
  *
  * Try to find a ready entity, returns NULL if none found.
  */
 static struct drm_sched_entity *
-drm_sched_rq_select_entity(struct drm_sched_rq *rq)
+drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
 {
        struct drm_sched_entity *entity;
 
        return NULL;
 }
 
+/**
+ * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
+ *
+ * @rq: scheduler run queue to check.
+ *
+ * Find oldest waiting ready entity, returns NULL if none found.
+ */
+static struct drm_sched_entity *
+drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
+{
+       struct rb_node *rb;
+
+       spin_lock(&rq->lock);
+       for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
+               struct drm_sched_entity *entity;
+
+               entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
+               if (drm_sched_entity_is_ready(entity)) {
+                       rq->current_entity = entity;
+                       reinit_completion(&entity->entity_idle);
+                       break;
+               }
+       }
+       spin_unlock(&rq->lock);
+
+       return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
+}
+
 /**
  * drm_sched_job_done - complete a job
  * @s_job: pointer to the job which is done
 
        /* Kernel run queue has higher priority than normal run queue*/
        for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
-               entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
+               entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
+                       drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
+                       drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
                if (entity)
                        break;
        }
 
        DRM_SCHED_PRIORITY_UNSET = -2
 };
 
+/* Used to chose between FIFO and RR jobs scheduling */
+extern int drm_sched_policy;
+
+#define DRM_SCHED_POLICY_RR    0
+#define DRM_SCHED_POLICY_FIFO  1
+
 /**
  * struct drm_sched_entity - A wrapper around a job queue (typically
  * attached to the DRM file_priv).
         * drm_sched_entity_fini().
         */
        struct completion               entity_idle;
+
+       /**
+        * @oldest_job_waiting:
+        *
+        * Marks earliest job waiting in SW queue
+        */
+       ktime_t                         oldest_job_waiting;
+
+       /**
+        * @rb_tree_node:
+        *
+        * The node used to insert this entity into time based priority queue
+        */
+       struct rb_node                  rb_tree_node;
+
 };
 
 /**
  * @sched: the scheduler to which this rq belongs to.
  * @entities: list of the entities to be scheduled.
  * @current_entity: the entity which is to be scheduled.
+ * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling
  *
  * Run queue is a set of entities scheduling command submissions for
  * one specific ring. It implements the scheduling policy that selects
        struct drm_gpu_scheduler        *sched;
        struct list_head                entities;
        struct drm_sched_entity         *current_entity;
+       struct rb_root_cached           rb_tree_root;
 };
 
 /**
 
        /** @last_dependency: tracks @dependencies as they signal */
        unsigned long                   last_dependency;
+
+       /**
+        * @submit_ts:
+        *
+        * When the job was pushed into the entity queue.
+        */
+       ktime_t                         submit_ts;
 };
 
 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
                                struct drm_sched_entity *entity);
 
+void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts);
+
 int drm_sched_entity_init(struct drm_sched_entity *entity,
                          enum drm_sched_priority priority,
                          struct drm_gpu_scheduler **sched_list,