/* Check that the context wasn't destroyed before submission */
        if (likely(!intel_context_is_closed(eb->context))) {
                attr = eb->gem_context->sched;
-
-               /*
-                * Boost actual workloads past semaphores!
-                *
-                * With semaphores we spin on one engine waiting for another,
-                * simply to reduce the latency of starting our work when
-                * the signaler completes. However, if there is any other
-                * work that we could be doing on this engine instead, that
-                * is better utilisation and will reduce the overall duration
-                * of the current work. To avoid PI boosting a semaphore
-                * far in the distance past over useful work, we keep a history
-                * of any semaphore use along our dependency chain.
-                */
-               if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
-                       attr.priority |= I915_PRIORITY_NOSEMAPHORE;
        } else {
                /* Serialise with context_close via the add_to_timeline */
                i915_request_set_error_once(rq, -ENOENT);
 
        if (i915_request_has_nopreempt(rq))
                prio = I915_PRIORITY_UNPREEMPTABLE;
 
-       /*
-        * On unwinding the active request, we give it a priority bump
-        * if it has completed waiting on any semaphore. If we know that
-        * the request has already started, we can prevent an unwanted
-        * preempt-to-idle cycle by taking that into account now.
-        */
-       if (__i915_request_has_started(rq))
-               prio |= I915_PRIORITY_NOSEMAPHORE;
-
        return prio;
 }
 
 
 
        /* Opencode i915_request_add() so we can keep the timeline locked. */
        __i915_request_commit(rq);
+       rq->sched.attr.priority = I915_PRIORITY_BARRIER;
        __i915_request_queue(rq, NULL);
 
        timeout = i915_request_wait(rq, 0, HZ / 10);
 
        I915_PRIORITY_DISPLAY,
 };
 
-#define I915_USER_PRIORITY_SHIFT 1
+#define I915_USER_PRIORITY_SHIFT 0
 #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
 
 #define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
 #define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
 
-#define I915_PRIORITY_NOSEMAPHORE      ((u8)BIT(0))
-
 /* Smallest priority value that cannot be bumped. */
 #define I915_PRIORITY_INVALID (INT_MIN | (u8)I915_PRIORITY_MASK)
 
 
        }
        spin_unlock_irq(&signal->lock);
 
-       /* Copy across semaphore status as we need the same behaviour */
-       rq->sched.flags |= signal->sched.flags;
        return 0;
 }
 
        spin_unlock(&request->lock);
 
        /* We've already spun, don't charge on resubmitting. */
-       if (request->sched.semaphores && i915_request_started(request)) {
-               request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+       if (request->sched.semaphores && i915_request_started(request))
                request->sched.semaphores = 0;
-       }
 
        /*
         * We don't need to wake_up any waiters on request->execute, they
        return NOTIFY_DONE;
 }
 
-static void irq_semaphore_cb(struct irq_work *wrk)
-{
-       struct i915_request *rq =
-               container_of(wrk, typeof(*rq), semaphore_work);
-
-       i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
-       i915_request_put(rq);
-}
-
 static int __i915_sw_fence_call
 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 {
 
        switch (state) {
        case FENCE_COMPLETE:
-               if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
-                       i915_request_get(rq);
-                       init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
-                       irq_work_queue(&rq->semaphore_work);
-               }
                break;
 
        case FENCE_FREE:
                    gfp_t gfp)
 {
        const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+       struct i915_sw_fence *wait = &to->submit;
 
        if (!intel_context_use_semaphores(to->context))
                goto await_fence;
                goto await_fence;
 
        to->sched.semaphores |= mask;
-       to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
-       return 0;
+       wait = &to->semaphore;
 
 await_fence:
-       return i915_sw_fence_await_dma_fence(&to->submit,
+       return i915_sw_fence_await_dma_fence(wait,
                                             &from->fence, 0,
                                             I915_FENCE_GFP);
 }
        if (ret < 0)
                return ret;
 
-       if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
-               ret = i915_sw_fence_await_dma_fence(&to->semaphore,
-                                                   &from->fence, 0,
-                                                   I915_FENCE_GFP);
-               if (ret < 0)
-                       return ret;
-       }
-
-       if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
-               to->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
-
        return 0;
 }
 
                attr = ctx->sched;
        rcu_read_unlock();
 
-       if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
-               attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
        __i915_request_queue(rq, &attr);
 
        mutex_unlock(&tl->mutex);
 
        };
        struct list_head execute_cb;
        struct i915_sw_fence semaphore;
-       struct irq_work semaphore_work;
 
        /*
         * A list of everyone we wait upon, and everyone who waits upon us.
 
        GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
                   rb_first(&execlists->queue.rb_root));
 
-       last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
+       last_prio = INT_MAX;
        for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
                const struct i915_priolist *p = to_priolist(rb);
 
-               GEM_BUG_ON(p->priority >= last_prio);
+               GEM_BUG_ON(p->priority > last_prio);
                last_prio = p->priority;
 
                GEM_BUG_ON(!p->used);
                dep->waiter = node;
                dep->flags = flags;
 
-               /* Keep track of whether anyone on this chain has a semaphore */
-               if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
-                   !node_started(signal))
-                       node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
-
                /* All set, now publish. Beware the lockless walkers. */
                list_add_rcu(&dep->signal_link, &node->signalers_list);
                list_add_rcu(&dep->wait_link, &signal->waiters_list);
 
+               /* Propagate the chains */
+               node->flags |= signal->flags;
                ret = true;
        }
 
 
        struct list_head link;
        struct i915_sched_attr attr;
        unsigned int flags;
-#define I915_SCHED_HAS_SEMAPHORE_CHAIN BIT(0)
-#define I915_SCHED_HAS_EXTERNAL_CHAIN  BIT(1)
+#define I915_SCHED_HAS_EXTERNAL_CHAIN  BIT(0)
        intel_engine_mask_t semaphores;
 };