return true;
 }
 
-/**
- * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
- *
- * @entity: scheduler entity
- *
- * Return the pointer to the rq with least load.
- */
-static struct drm_sched_rq *
-drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
-{
-       struct drm_sched_rq *rq = NULL;
-       unsigned int min_jobs = UINT_MAX, num_jobs;
-       int i;
-
-       for (i = 0; i < entity->num_sched_list; ++i) {
-               struct drm_gpu_scheduler *sched = entity->sched_list[i];
-
-               if (!entity->sched_list[i]->ready) {
-                       DRM_WARN("sched%s is not ready, skipping", sched->name);
-                       continue;
-               }
-
-               num_jobs = atomic_read(&sched->num_jobs);
-               if (num_jobs < min_jobs) {
-                       min_jobs = num_jobs;
-                       rq = &entity->sched_list[i]->sched_rq[entity->priority];
-               }
-       }
-
-       return rq;
-}
-
 /**
  * drm_sched_entity_flush - Flush a context entity
  *
 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
 {
        struct dma_fence *fence;
+       struct drm_gpu_scheduler *sched;
        struct drm_sched_rq *rq;
 
        if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
                return;
 
        spin_lock(&entity->rq_lock);
-       rq = drm_sched_entity_get_free_sched(entity);
+       sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
+       rq = sched ? &sched->sched_rq[entity->priority] : NULL;
        if (rq != entity->rq) {
                drm_sched_rq_remove_entity(entity->rq, entity);
                entity->rq = rq;
 
        return job;
 }
 
+/**
+ * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
+ * @sched_list: list of drm_gpu_schedulers
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list
+ *
+ * Returns pointer of the sched with the least load or NULL if none of the
+ * drm_gpu_schedulers are ready
+ */
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+                    unsigned int num_sched_list)
+{
+       struct drm_gpu_scheduler *sched, *picked_sched = NULL;
+       int i;
+       unsigned int min_jobs = UINT_MAX, num_jobs;
+
+       for (i = 0; i < num_sched_list; ++i) {
+               sched = sched_list[i];
+
+               if (!sched->ready) {
+                       DRM_WARN("scheduler %s is not ready, skipping",
+                                sched->name);
+                       continue;
+               }
+
+               num_jobs = atomic_read(&sched->num_jobs);
+               if (num_jobs < min_jobs) {
+                       min_jobs = num_jobs;
+                       picked_sched = sched;
+               }
+       }
+
+       return picked_sched;
+}
+EXPORT_SYMBOL(drm_sched_pick_best);
+
 /**
  * drm_sched_blocked - check if the scheduler is blocked
  *
 
 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
                                unsigned long remaining);
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+                    unsigned int num_sched_list);
 
 #endif