* every jiffie in order to kick the oldest waiter to do the
         * coherent seqno check.
         */
-       rcu_read_lock();
        if (intel_engine_wakeup(engine))
                mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
-       rcu_read_unlock();
 }
 
 static void irq_enable(struct intel_engine_cs *engine)
        }
        rb_link_node(&wait->node, parent, p);
        rb_insert_color(&wait->node, &b->waiters);
-       GEM_BUG_ON(!first && !b->irq_seqno_bh);
+       GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
 
        if (completed) {
                struct rb_node *next = rb_next(completed);
                        GEM_BUG_ON(first);
                        b->timeout = wait_timeout();
                        b->first_wait = to_wait(next);
-                       smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+                       rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
                        /* As there is a delay between reading the current
                         * seqno, processing the completed tasks and selecting
                         * the next waiter, we may have missed the interrupt
                GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
                b->timeout = wait_timeout();
                b->first_wait = wait;
-               smp_store_mb(b->irq_seqno_bh, wait->tsk);
+               rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
                /* After assigning ourselves as the new bottom-half, we must
                 * perform a cursory check to prevent a missed interrupt.
                 * Either we miss the interrupt whilst programming the hardware,
                 */
                __intel_breadcrumbs_enable_irq(b);
        }
-       GEM_BUG_ON(!b->irq_seqno_bh);
+       GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
        GEM_BUG_ON(!b->first_wait);
        GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
 
                const int priority = wakeup_priority(b, wait->tsk);
                struct rb_node *next;
 
-               GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
+               GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
 
                /* We are the current bottom-half. Find the next candidate,
                 * the first waiter in the queue on the remaining oldest
                         */
                        b->timeout = wait_timeout();
                        b->first_wait = to_wait(next);
-                       smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+                       rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
                        if (b->first_wait->seqno != wait->seqno)
                                __intel_breadcrumbs_enable_irq(b);
-                       wake_up_process(b->irq_seqno_bh);
+                       wake_up_process(b->first_wait->tsk);
                } else {
                        b->first_wait = NULL;
-                       WRITE_ONCE(b->irq_seqno_bh, NULL);
+                       rcu_assign_pointer(b->irq_seqno_bh, NULL);
                        __intel_breadcrumbs_disable_irq(b);
                }
        } else {
        GEM_BUG_ON(b->first_wait == wait);
        GEM_BUG_ON(rb_first(&b->waiters) !=
                   (b->first_wait ? &b->first_wait->node : NULL));
-       GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
+       GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
        spin_unlock(&b->lock);
 }
 
         * RCU lock, i.e. as we call wake_up_process() we must be holding the
         * rcu_read_lock().
         */
-       rcu_read_lock();
        for_each_engine(engine, i915)
                if (unlikely(intel_engine_wakeup(engine)))
                        mask |= intel_engine_flag(engine);
-       rcu_read_unlock();
 
        return mask;
 }
 
         * the overhead of waking that client is much preferred.
         */
        struct intel_breadcrumbs {
-               struct task_struct *irq_seqno_bh; /* bh for user interrupts */
+               struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
                bool irq_posted;
 
                spinlock_t lock; /* protects the lists of requests */
                              struct intel_wait *wait);
 void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
 
-static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
+static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
 {
-       return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+       return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
 }
 
-static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
+static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
 {
        bool wakeup = false;
-       struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+
        /* Note that for this not to dangerously chase a dangling pointer,
-        * the caller is responsible for ensure that the task remain valid for
-        * wake_up_process() i.e. that the RCU grace period cannot expire.
+        * we must hold the rcu_read_lock here.
         *
         * Also note that tsk is likely to be in !TASK_RUNNING state so an
         * early test for tsk->state != TASK_RUNNING before wake_up_process()
         * is unlikely to be beneficial.
         */
-       if (tsk)
-               wakeup = wake_up_process(tsk);
+       if (intel_engine_has_waiter(engine)) {
+               struct task_struct *tsk;
+
+               rcu_read_lock();
+               tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
+               if (tsk)
+                       wakeup = wake_up_process(tsk);
+               rcu_read_unlock();
+       }
+
        return wakeup;
 }