* low latency and no jitter] the chance to naturally
                         * complete before being preempted.
                         */
-                       attr.priority = I915_PRIORITY_MASK;
+                       attr.priority = 0;
                        if (rq->sched.attr.priority >= attr.priority)
                                attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT);
                        if (rq->sched.attr.priority >= attr.priority)
 
 
 static int queue_prio(const struct intel_engine_execlists *execlists)
 {
-       struct i915_priolist *p;
        struct rb_node *rb;
 
        rb = rb_first_cached(&execlists->queue);
        if (!rb)
                return INT_MIN;
 
-       /*
-        * As the priolist[] are inverted, with the highest priority in [0],
-        * we have to flip the index value to become priority.
-        */
-       p = to_priolist(rb);
-       if (!I915_USER_PRIORITY_SHIFT)
-               return p->priority;
-
-       return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
+       return to_priolist(rb)->priority;
 }
 
 static int virtual_prio(const struct intel_engine_execlists *el)
        while ((rb = rb_first_cached(&execlists->queue))) {
                struct i915_priolist *p = to_priolist(rb);
                struct i915_request *rq, *rn;
-               int i;
 
-               priolist_for_each_request_consume(rq, rn, p, i) {
+               priolist_for_each_request_consume(rq, rn, p) {
                        bool merge = true;
 
                        /*
        /* Flush the queued requests to the timeline list (for retiring). */
        while ((rb = rb_first_cached(&execlists->queue))) {
                struct i915_priolist *p = to_priolist(rb);
-               int i;
 
-               priolist_for_each_request_consume(rq, rn, p, i) {
+               priolist_for_each_request_consume(rq, rn, p) {
                        i915_request_mark_eio(rq);
                        __i915_request_submit(rq);
                }
 
 static struct list_head *virtual_queue(struct virtual_engine *ve)
 {
-       return &ve->base.execlists.default_priolist.requests[0];
+       return &ve->base.execlists.default_priolist.requests;
 }
 
 static void rcu_virtual_context_destroy(struct work_struct *wrk)
        count = 0;
        for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
                struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
-               int i;
 
-               priolist_for_each_request(rq, p, i) {
+               priolist_for_each_request(rq, p) {
                        if (count++ < max - 1)
                                show_request(m, rq, "\t\t", 0);
                        else
 
 
        intel_ring_advance(rq, cs);
 
-       rq->sched.attr.priority = I915_PRIORITY_MASK;
        err = 0;
 err:
        i915_request_get(rq);
 
 
        intel_ring_advance(rq, cs);
 
-       rq->sched.attr.priority = I915_PRIORITY_MASK;
        err = 0;
 err:
        i915_request_get(rq);
 
        while ((rb = rb_first_cached(&execlists->queue))) {
                struct i915_priolist *p = to_priolist(rb);
                struct i915_request *rq, *rn;
-               int i;
 
-               priolist_for_each_request_consume(rq, rn, p, i) {
+               priolist_for_each_request_consume(rq, rn, p) {
                        if (last && rq->context != last->context) {
                                if (port == last_port)
                                        goto done;
        /* Flush the queued requests to the timeline list (for retiring). */
        while ((rb = rb_first_cached(&execlists->queue))) {
                struct i915_priolist *p = to_priolist(rb);
-               int i;
 
-               priolist_for_each_request_consume(rq, rn, p, i) {
+               priolist_for_each_request_consume(rq, rn, p) {
                        list_del_init(&rq->sched.link);
                        __i915_request_submit(rq);
                        dma_fence_set_error(&rq->fence, -EIO);
 
 #define I915_USER_PRIORITY_SHIFT 0
 #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
 
-#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
-#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
-
 /* Smallest priority value that cannot be bumped. */
-#define I915_PRIORITY_INVALID (INT_MIN | (u8)I915_PRIORITY_MASK)
+#define I915_PRIORITY_INVALID (INT_MIN)
 
 /*
  * Requests containing performance queries must not be preempted by
 #define I915_PRIORITY_BARRIER (I915_PRIORITY_UNPREEMPTABLE - 1)
 
 struct i915_priolist {
-       struct list_head requests[I915_PRIORITY_COUNT];
+       struct list_head requests;
        struct rb_node node;
-       unsigned long used;
        int priority;
 };
 
 
 static void assert_priolists(struct intel_engine_execlists * const execlists)
 {
        struct rb_node *rb;
-       long last_prio, i;
+       long last_prio;
 
        if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
                return;
 
                GEM_BUG_ON(p->priority > last_prio);
                last_prio = p->priority;
-
-               GEM_BUG_ON(!p->used);
-               for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
-                       if (list_empty(&p->requests[i]))
-                               continue;
-
-                       GEM_BUG_ON(!(p->used & BIT(i)));
-               }
        }
 }
 
        struct i915_priolist *p;
        struct rb_node **parent, *rb;
        bool first = true;
-       int idx, i;
 
        lockdep_assert_held(&engine->active.lock);
        assert_priolists(execlists);
 
-       /* buckets sorted from highest [in slot 0] to lowest priority */
-       idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
        prio >>= I915_USER_PRIORITY_SHIFT;
        if (unlikely(execlists->no_priolist))
                prio = I915_PRIORITY_NORMAL;
                        parent = &rb->rb_right;
                        first = false;
                } else {
-                       goto out;
+                       return &p->requests;
                }
        }
 
        }
 
        p->priority = prio;
-       for (i = 0; i < ARRAY_SIZE(p->requests); i++)
-               INIT_LIST_HEAD(&p->requests[i]);
+       INIT_LIST_HEAD(&p->requests);
+
        rb_link_node(&p->node, rb, parent);
        rb_insert_color_cached(&p->node, &execlists->queue, first);
-       p->used = 0;
 
-out:
-       p->used |= BIT(idx);
-       return &p->requests[idx];
+       return &p->requests;
 }
 
 void __i915_priolist_free(struct i915_priolist *p)
        spin_unlock_irq(&schedule_lock);
 }
 
-static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
-{
-       struct i915_sched_attr attr = node->attr;
-
-       if (attr.priority & bump)
-               return;
-
-       attr.priority |= bump;
-       __i915_schedule(node, &attr);
-}
-
-void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
-{
-       unsigned long flags;
-
-       GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
-       if (READ_ONCE(rq->sched.attr.priority) & bump)
-               return;
-
-       spin_lock_irqsave(&schedule_lock, flags);
-       __bump_priority(&rq->sched, bump);
-       spin_unlock_irqrestore(&schedule_lock, flags);
-}
-
 void i915_sched_node_init(struct i915_sched_node *node)
 {
        INIT_LIST_HEAD(&node->signalers_list);
        if (!global.slab_dependencies)
                return -ENOMEM;
 
-       global.slab_priorities = KMEM_CACHE(i915_priolist,
-                                           SLAB_HWCACHE_ALIGN);
+       global.slab_priorities = KMEM_CACHE(i915_priolist, 0);
        if (!global.slab_priorities)
                goto err_priorities;
 
 
 
 struct drm_printer;
 
-#define priolist_for_each_request(it, plist, idx) \
-       for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
-               list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+#define priolist_for_each_request(it, plist) \
+       list_for_each_entry(it, &(plist)->requests, sched.link)
 
-#define priolist_for_each_request_consume(it, n, plist, idx) \
-       for (; \
-            (plist)->used ? (idx = __ffs((plist)->used)), 1 : 0; \
-            (plist)->used &= ~BIT(idx)) \
-               list_for_each_entry_safe(it, n, \
-                                        &(plist)->requests[idx], \
-                                        sched.link)
+#define priolist_for_each_request_consume(it, n, plist) \
+       list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
 
 void i915_sched_node_init(struct i915_sched_node *node);
 void i915_sched_node_reinit(struct i915_sched_node *node);
 void i915_schedule(struct i915_request *request,
                   const struct i915_sched_attr *attr);
 
-void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
-
 struct list_head *
 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);