intel_engine_add_retire(b->irq_engine, tl);
 }
 
-static bool __signal_request(struct i915_request *rq, struct list_head *signals)
+static bool __signal_request(struct i915_request *rq)
 {
-       clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
-
        if (!__dma_fence_signal(&rq->fence)) {
                i915_request_put(rq);
                return false;
        }
 
-       list_add_tail(&rq->signal_link, signals);
        return true;
 }
 
+static struct llist_node *
+slist_add(struct llist_node *node, struct llist_node *head)
+{
+       node->next = head;
+       return node;
+}
+
 static void signal_irq_work(struct irq_work *work)
 {
        struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
        const ktime_t timestamp = ktime_get();
+       struct llist_node *signal, *sn;
        struct intel_context *ce, *cn;
        struct list_head *pos, *next;
-       LIST_HEAD(signal);
+
+       signal = NULL;
+       if (unlikely(!llist_empty(&b->signaled_requests)))
+               signal = llist_del_all(&b->signaled_requests);
 
        spin_lock(&b->irq_lock);
 
        if (b->irq_armed && list_empty(&b->signalers))
                __intel_breadcrumbs_disarm_irq(b);
 
-       list_splice_init(&b->signaled_requests, &signal);
-
        list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
                GEM_BUG_ON(list_empty(&ce->signals));
 
                         * spinlock as the callback chain may end up adding
                         * more signalers to the same context or engine.
                         */
-                       __signal_request(rq, &signal);
+                       clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+                       if (__signal_request(rq))
+                               /* We own signal_node now, xfer to local list */
+                               signal = slist_add(&rq->signal_node, signal);
                }
 
                /*
 
        spin_unlock(&b->irq_lock);
 
-       list_for_each_safe(pos, next, &signal) {
+       llist_for_each_safe(signal, sn, signal) {
                struct i915_request *rq =
-                       list_entry(pos, typeof(*rq), signal_link);
+                       llist_entry(signal, typeof(*rq), signal_node);
                struct list_head cb_list;
 
                spin_lock(&rq->lock);
 
        spin_lock_init(&b->irq_lock);
        INIT_LIST_HEAD(&b->signalers);
-       INIT_LIST_HEAD(&b->signaled_requests);
+       init_llist_head(&b->signaled_requests);
 
        init_irq_work(&b->irq_work, signal_irq_work);
 
         * its signal completion.
         */
        if (__request_completed(rq)) {
-               if (__signal_request(rq, &b->signaled_requests))
+               if (__signal_request(rq) &&
+                   llist_add(&rq->signal_node, &b->signaled_requests))
                        irq_work_queue(&b->irq_work);
                return;
        }