intel_engine_set_hwsp_writemask(engine, ~0u);
 }
 
+static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+       GEM_DEBUG_WARN_ON(iir);
+}
+
 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 {
        const struct engine_info *info = &intel_engines[id];
        engine->hw_id = info->hw_id;
        engine->guc_id = MAKE_GUC_ID(info->class, info->instance);
 
+       engine->irq_handler = nop_irq_handler;
+
        engine->class = info->class;
        engine->instance = info->instance;
        __sprint_engine_name(engine);
 
        u32             irq_enable_mask; /* bitmask to enable ring interrupt */
        void            (*irq_enable)(struct intel_engine_cs *engine);
        void            (*irq_disable)(struct intel_engine_cs *engine);
+       void            (*irq_handler)(struct intel_engine_cs *engine, u16 iir);
 
        void            (*sanitize)(struct intel_engine_cs *engine);
        int             (*resume)(struct intel_engine_cs *engine);
 #define I915_ENGINE_HAS_PREEMPTION   BIT(2)
 #define I915_ENGINE_HAS_SEMAPHORES   BIT(3)
 #define I915_ENGINE_HAS_TIMESLICES   BIT(4)
-#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(5)
-#define I915_ENGINE_IS_VIRTUAL       BIT(6)
-#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(7)
-#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(8)
+#define I915_ENGINE_IS_VIRTUAL       BIT(5)
+#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
        unsigned int flags;
 
        /*
        return engine->flags & I915_ENGINE_HAS_TIMESLICES;
 }
 
-static inline bool
-intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
-{
-       return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
-}
-
 static inline bool
 intel_engine_is_virtual(const struct intel_engine_cs *engine)
 {
 
 #include "intel_engine_stats.h"
 #include "intel_execlists_submission.h"
 #include "intel_gt.h"
+#include "intel_gt_irq.h"
 #include "intel_gt_pm.h"
 #include "intel_gt_requests.h"
 #include "intel_lrc.h"
        rcu_read_unlock();
 }
 
+static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+       bool tasklet = false;
+
+       if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
+               u32 eir;
+
+               /* Upper 16b are the enabling mask, rsvd for internal errors */
+               eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
+               ENGINE_TRACE(engine, "CS error: %x\n", eir);
+
+               /* Disable the error interrupt until after the reset */
+               if (likely(eir)) {
+                       ENGINE_WRITE(engine, RING_EMR, ~0u);
+                       ENGINE_WRITE(engine, RING_EIR, eir);
+                       WRITE_ONCE(engine->execlists.error_interrupt, eir);
+                       tasklet = true;
+               }
+       }
+
+       if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
+               WRITE_ONCE(engine->execlists.yield,
+                          ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
+               ENGINE_TRACE(engine, "semaphore yield: %08x\n",
+                            engine->execlists.yield);
+               if (del_timer(&engine->execlists.timer))
+                       tasklet = true;
+       }
+
+       if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+               tasklet = true;
+
+       if (iir & GT_RENDER_USER_INTERRUPT)
+               intel_engine_signal_breadcrumbs(engine);
+
+       if (tasklet)
+               tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
 static void __execlists_kick(struct intel_engine_execlists *execlists)
 {
        /* Kick the tasklet for some interrupt coalescing and reset handling */
                 * until a more refined solution exists.
                 */
        }
+       intel_engine_set_irq_handler(engine, execlists_irq_handler);
 
        engine->flags |= I915_ENGINE_SUPPORTS_STATS;
        if (!intel_vgpu_active(engine->i915)) {
 
                intel_guc_to_host_event_handler(guc);
 }
 
-static void
-cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
-{
-       bool tasklet = false;
-
-       if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
-               u32 eir;
-
-               /* Upper 16b are the enabling mask, rsvd for internal errors */
-               eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
-               ENGINE_TRACE(engine, "CS error: %x\n", eir);
-
-               /* Disable the error interrupt until after the reset */
-               if (likely(eir)) {
-                       ENGINE_WRITE(engine, RING_EMR, ~0u);
-                       ENGINE_WRITE(engine, RING_EIR, eir);
-                       WRITE_ONCE(engine->execlists.error_interrupt, eir);
-                       tasklet = true;
-               }
-       }
-
-       if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
-               WRITE_ONCE(engine->execlists.yield,
-                          ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
-               ENGINE_TRACE(engine, "semaphore yield: %08x\n",
-                            engine->execlists.yield);
-               if (del_timer(&engine->execlists.timer))
-                       tasklet = true;
-       }
-
-       if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
-               tasklet = true;
-
-       if (iir & GT_RENDER_USER_INTERRUPT) {
-               intel_engine_signal_breadcrumbs(engine);
-               tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
-       }
-
-       if (tasklet)
-               tasklet_hi_schedule(&engine->execlists.tasklet);
-}
-
 static u32
 gen11_gt_engine_identity(struct intel_gt *gt,
                         const unsigned int bank, const unsigned int bit)
                engine = NULL;
 
        if (likely(engine))
-               return cs_irq_handler(engine, iir);
+               return intel_engine_cs_irq(engine, iir);
 
        WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
                  class, instance);
 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
 {
        if (gt_iir & GT_RENDER_USER_INTERRUPT)
-               intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
+               intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+                                   gt_iir);
+
        if (gt_iir & ILK_BSD_USER_INTERRUPT)
-               intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+               intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+                                   gt_iir);
 }
 
 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
 {
        if (gt_iir & GT_RENDER_USER_INTERRUPT)
-               intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
+               intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+                                   gt_iir);
+
        if (gt_iir & GT_BSD_USER_INTERRUPT)
-               intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+               intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+                                   gt_iir >> 12);
+
        if (gt_iir & GT_BLT_USER_INTERRUPT)
-               intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]);
+               intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
+                                   gt_iir >> 22);
 
        if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
                      GT_BSD_CS_ERROR_INTERRUPT |
        if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
                iir = raw_reg_read(regs, GEN8_GT_IIR(0));
                if (likely(iir)) {
-                       cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
-                                      iir >> GEN8_RCS_IRQ_SHIFT);
-                       cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
-                                      iir >> GEN8_BCS_IRQ_SHIFT);
+                       intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+                                           iir >> GEN8_RCS_IRQ_SHIFT);
+                       intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
+                                           iir >> GEN8_BCS_IRQ_SHIFT);
                        raw_reg_write(regs, GEN8_GT_IIR(0), iir);
                }
        }
        if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
                iir = raw_reg_read(regs, GEN8_GT_IIR(1));
                if (likely(iir)) {
-                       cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
-                                      iir >> GEN8_VCS0_IRQ_SHIFT);
-                       cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
-                                      iir >> GEN8_VCS1_IRQ_SHIFT);
+                       intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+                                           iir >> GEN8_VCS0_IRQ_SHIFT);
+                       intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1],
+                                           iir >> GEN8_VCS1_IRQ_SHIFT);
                        raw_reg_write(regs, GEN8_GT_IIR(1), iir);
                }
        }
        if (master_ctl & GEN8_GT_VECS_IRQ) {
                iir = raw_reg_read(regs, GEN8_GT_IIR(3));
                if (likely(iir)) {
-                       cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
-                                      iir >> GEN8_VECS_IRQ_SHIFT);
+                       intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+                                           iir >> GEN8_VECS_IRQ_SHIFT);
                        raw_reg_write(regs, GEN8_GT_IIR(3), iir);
                }
        }
 
 
 #include <linux/types.h>
 
+#include "intel_engine_types.h"
+
 struct intel_gt;
 
 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
 void gen8_gt_irq_reset(struct intel_gt *gt);
 void gen8_gt_irq_postinstall(struct intel_gt *gt);
 
+static inline void intel_engine_cs_irq(struct intel_engine_cs *engine, u16 iir)
+{
+       if (iir)
+               engine->irq_handler(engine, iir);
+}
+
+static inline void
+intel_engine_set_irq_handler(struct intel_engine_cs *engine,
+                            void (*fn)(struct intel_engine_cs *engine,
+                                       u16 iir))
+{
+       /*
+        * As the interrupt is live as allocate and setup the engines,
+        * err on the side of caution and apply barriers to updating
+        * the irq handler callback. This assures that when we do use
+        * the engine, we will receive interrupts only to ourselves,
+        * and not lose any.
+        */
+       smp_store_mb(engine->irq_handler, fn);
+}
+
 #endif /* INTEL_GT_IRQ_H */
 
 #include "intel_breadcrumbs.h"
 #include "intel_context.h"
 #include "intel_gt.h"
+#include "intel_gt_irq.h"
 #include "intel_reset.h"
 #include "intel_ring.h"
 #include "shmem_utils.h"
        intel_timeline_put(engine->legacy.timeline);
 }
 
+static void irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+       intel_engine_signal_breadcrumbs(engine);
+}
+
 static void setup_irq(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *i915 = engine->i915;
 
+       intel_engine_set_irq_handler(engine, irq_handler);
+
        if (INTEL_GEN(i915) >= 6) {
                engine->irq_enable = gen6_irq_enable;
                engine->irq_disable = gen6_irq_disable;
 
                return;
 
        if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-               intel_engine_signal_breadcrumbs(gt->engine[VECS0]);
+               intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10);
 
        if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
                DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
 
 #include "gt/intel_context.h"
 #include "gt/intel_engine_pm.h"
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_lrc.h"
 #include "gt/intel_mocs.h"
        spin_unlock_irqrestore(&engine->active.lock, flags);
 }
 
+static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+       if (iir & GT_RENDER_USER_INTERRUPT) {
+               intel_engine_signal_breadcrumbs(engine);
+               tasklet_hi_schedule(&engine->execlists.tasklet);
+       }
+}
+
 static void guc_reset_prepare(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
        }
        engine->set_default_submission = guc_set_default_submission;
 
-       engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
        engine->flags |= I915_ENGINE_HAS_PREEMPTION;
 
        /*
 static inline void guc_default_irqs(struct intel_engine_cs *engine)
 {
        engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
+       intel_engine_set_irq_handler(engine, cs_irq_handler);
 }
 
 int intel_guc_submission_setup(struct intel_engine_cs *engine)
 
                intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
-                       intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
+                       intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
 
                if (iir & I915_MASTER_ERROR_INTERRUPT)
                        i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
                intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
-                       intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
+                       intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
 
                if (iir & I915_MASTER_ERROR_INTERRUPT)
                        i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
                intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
-                       intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
+                       intel_engine_cs_irq(dev_priv->gt.engine[RCS0],
+                                           iir);
 
                if (iir & I915_BSD_USER_INTERRUPT)
-                       intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
+                       intel_engine_cs_irq(dev_priv->gt.engine[VCS0],
+                                           iir >> 25);
 
                if (iir & I915_MASTER_ERROR_INTERRUPT)
                        i9xx_error_irq_handler(dev_priv, eir, eir_stuck);