return false;
 
        /* Caller disables interrupts */
-       spin_lock(&engine->gt->irq_lock);
+       spin_lock(engine->gt->irq_lock);
        engine->irq_enable(engine);
-       spin_unlock(&engine->gt->irq_lock);
+       spin_unlock(engine->gt->irq_lock);
 
        return true;
 }
                return;
 
        /* Caller disables interrupts */
-       spin_lock(&engine->gt->irq_lock);
+       spin_lock(engine->gt->irq_lock);
        engine->irq_disable(engine);
-       spin_unlock(&engine->gt->irq_lock);
+       spin_unlock(engine->gt->irq_lock);
 }
 
 void intel_engines_reset_default_submission(struct intel_gt *gt)
 
 
 void intel_gt_common_init_early(struct intel_gt *gt)
 {
-       spin_lock_init(>->irq_lock);
+       spin_lock_init(gt->irq_lock);
 
        INIT_LIST_HEAD(>->closed_vma);
        spin_lock_init(>->closed_lock);
 }
 
 /* Preliminary initialization of Tile 0 */
-void intel_root_gt_init_early(struct drm_i915_private *i915)
+int intel_root_gt_init_early(struct drm_i915_private *i915)
 {
        struct intel_gt *gt = to_gt(i915);
 
        gt->i915 = i915;
        gt->uncore = &i915->uncore;
+       gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
+       if (!gt->irq_lock)
+               return -ENOMEM;
 
        intel_gt_common_init_early(gt);
+
+       return 0;
 }
 
 static int intel_gt_probe_lmem(struct intel_gt *gt)
 
        if (!gt_is_root(gt)) {
                struct intel_uncore *uncore;
+               spinlock_t *irq_lock;
 
                uncore = drmm_kzalloc(>->i915->drm, sizeof(*uncore), GFP_KERNEL);
                if (!uncore)
                        return -ENOMEM;
 
+               irq_lock = drmm_kzalloc(>->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
+               if (!irq_lock)
+                       return -ENOMEM;
+
                gt->uncore = uncore;
+               gt->irq_lock = irq_lock;
 
                intel_gt_common_init_early(gt);
        }
 
 }
 
 void intel_gt_common_init_early(struct intel_gt *gt);
-void intel_root_gt_init_early(struct drm_i915_private *i915);
+int intel_root_gt_init_early(struct drm_i915_private *i915);
 int intel_gt_assign_ggtt(struct intel_gt *gt);
 int intel_gt_init_mmio(struct intel_gt *gt);
 int __must_check intel_gt_init_hw(struct intel_gt *gt);
 
        u32 timeout_ts;
        u32 ident;
 
-       lockdep_assert_held(>->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
 
        unsigned long intr_dw;
        unsigned int bit;
 
-       lockdep_assert_held(>->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
 
 {
        unsigned int bank;
 
-       spin_lock(>->irq_lock);
+       spin_lock(gt->irq_lock);
 
        for (bank = 0; bank < 2; bank++) {
                if (master_ctl & GEN11_GT_DW_IRQ(bank))
                        gen11_gt_bank_handler(gt, bank);
        }
 
-       spin_unlock(>->irq_lock);
+       spin_unlock(gt->irq_lock);
 }
 
 bool gen11_gt_reset_one_iir(struct intel_gt *gt,
        void __iomem * const regs = gt->uncore->regs;
        u32 dw;
 
-       lockdep_assert_held(>->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
        if (dw & BIT(bit)) {
        if (!HAS_L3_DPF(gt->i915))
                return;
 
-       spin_lock(>->irq_lock);
+       spin_lock(gt->irq_lock);
        gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
-       spin_unlock(>->irq_lock);
+       spin_unlock(gt->irq_lock);
 
        if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
                gt->i915->l3_parity.which_slice |= 1 << 1;
                               u32 interrupt_mask,
                               u32 enabled_irq_mask)
 {
-       lockdep_assert_held(>->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
 
 
 
        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 
-       lockdep_assert_held(>->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        new_val = gt->pm_imr;
        new_val &= ~interrupt_mask;
        struct intel_uncore *uncore = gt->uncore;
        i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
 
-       lockdep_assert_held(>->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        intel_uncore_write(uncore, reg, reset_mask);
        intel_uncore_write(uncore, reg, reset_mask);
 
 void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
 {
-       lockdep_assert_held(>->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        gt->pm_ier |= enable_mask;
        write_pm_ier(gt);
 
 void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
 {
-       lockdep_assert_held(>->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        gt->pm_ier &= ~disable_mask;
        gen6_gt_pm_mask_irq(gt, disable_mask);
 
        struct intel_rc6 rc6;
        struct intel_rps rps;
 
-       spinlock_t irq_lock;
+       spinlock_t *irq_lock;
        u32 gt_imr;
        u32 pm_ier;
        u32 pm_imr;
 
 
        rps_reset_ei(rps);
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen6_gt_pm_enable_irq(gt, rps->pm_events);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        intel_uncore_write(gt->uncore,
                           GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
 {
        struct intel_gt *gt = rps_to_gt(rps);
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        if (GRAPHICS_VER(gt->i915) >= 11)
                gen11_rps_reset_interrupts(rps);
        else
                gen6_rps_reset_interrupts(rps);
 
        rps->pm_iir = 0;
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static void rps_disable_interrupts(struct intel_rps *rps)
        intel_uncore_write(gt->uncore,
                           GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        intel_synchronize_irq(gt->i915);
 
        int new_freq, adj, min, max;
        u32 pm_iir = 0;
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
        client_boost = atomic_read(&rps->num_waiters);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        /* Make sure we didn't queue anything we're not going to process. */
        if (!pm_iir && !client_boost)
        mutex_unlock(&rps->lock);
 
 out:
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen6_gt_pm_unmask_irq(gt, rps->pm_events);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
        struct intel_gt *gt = rps_to_gt(rps);
        const u32 events = rps->pm_events & pm_iir;
 
-       lockdep_assert_held(>->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        if (unlikely(!events))
                return;
 
        events = pm_iir & rps->pm_events;
        if (events) {
-               spin_lock(>->irq_lock);
+               spin_lock(gt->irq_lock);
 
                GT_TRACE(gt, "irq events:%x\n", events);
 
                rps->pm_iir |= events;
 
                schedule_work(&rps->work);
-               spin_unlock(>->irq_lock);
+               spin_unlock(gt->irq_lock);
        }
 
        if (GRAPHICS_VER(gt->i915) >= 8)
 
 
        uncore->gsi_offset = gsi_offset;
 
+       gt->irq_lock = to_gt(i915)->irq_lock;
        intel_gt_common_init_early(gt);
        intel_uncore_init_early(uncore, gt);
 
 
 
        assert_rpm_wakelock_held(>->i915->runtime_pm);
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
 
        assert_rpm_wakelock_held(>->i915->runtime_pm);
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
                     gt->pm_guc_events);
        gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
 
        assert_rpm_wakelock_held(>->i915->runtime_pm);
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
 
        gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
 
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
        intel_synchronize_irq(gt->i915);
 
        gen9_reset_guc_interrupts(guc);
 {
        struct intel_gt *gt = guc_to_gt(guc);
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
        struct intel_gt *gt = guc_to_gt(guc);
        u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
        intel_uncore_write(gt->uncore,
                           GEN11_GUC_SG_INTR_ENABLE, events);
        intel_uncore_write(gt->uncore,
                           GEN11_GUC_SG_INTR_MASK, ~events);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
 {
        struct intel_gt *gt = guc_to_gt(guc);
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
 
        intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
        intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
 
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
        intel_synchronize_irq(gt->i915);
 
        gen11_reset_guc_interrupts(guc);
 
        __reset_guc_busyness_stats(guc);
 
        /* Flush IRQ handler */
-       spin_lock_irq(&guc_to_gt(guc)->irq_lock);
-       spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
+       spin_lock_irq(guc_to_gt(guc)->irq_lock);
+       spin_unlock_irq(guc_to_gt(guc)->irq_lock);
 
        guc_flush_submissions(guc);
        guc_flush_destroyed_contexts(guc);
 
        intel_guc_enable_interrupts(guc);
 
        /* check for CT messages received before we enabled interrupts */
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        intel_guc_ct_event_handler(&guc->ct);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        drm_dbg(&i915->drm, "GuC communication enabled\n");
 
 
 
        intel_wopcm_init_early(&dev_priv->wopcm);
 
-       intel_root_gt_init_early(dev_priv);
+       ret = intel_root_gt_init_early(dev_priv);
+       if (ret < 0)
+               goto err_rootgt;
 
        i915_drm_clients_init(&dev_priv->clients, dev_priv);
 
        i915_gem_cleanup_early(dev_priv);
        intel_gt_driver_late_release_all(dev_priv);
        i915_drm_clients_fini(&dev_priv->clients);
+err_rootgt:
        intel_region_ttm_device_fini(dev_priv);
 err_ttm:
        vlv_suspend_cleanup(dev_priv);
 
 
 out:
        drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
         * We want to get the same effect as if we received a termination
         * interrupt, so just pretend that we did.
         */
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        intel_pxp_mark_termination_in_progress(pxp);
        pxp->session_events |= PXP_TERMINATION_REQUEST;
        queue_work(system_unbound_wq, &pxp->session_work);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static bool pxp_component_bound(struct intel_pxp *pxp)
 
                return -ENODEV;
 
        /* simulate a termination interrupt */
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        if (!wait_for_completion_timeout(&pxp->termination,
                                         msecs_to_jiffies(100)))
 
        if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
                return;
 
-       lockdep_assert_held(>->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        if (unlikely(!iir))
                return;
 
 static inline void pxp_irq_reset(struct intel_gt *gt)
 {
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen11_gt_reset_one_iir(gt, 0, GEN11_KCR);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 void intel_pxp_irq_enable(struct intel_pxp *pxp)
 {
        struct intel_gt *gt = pxp_to_gt(pxp);
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
 
        if (!pxp->irq_enabled)
                WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR));
        __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS);
        pxp->irq_enabled = true;
 
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 void intel_pxp_irq_disable(struct intel_pxp *pxp)
         */
        GEM_WARN_ON(intel_pxp_is_active(pxp));
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
 
        pxp->irq_enabled = false;
        __pxp_set_interrupts(gt, 0);
 
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
        intel_synchronize_irq(gt->i915);
 
        pxp_irq_reset(gt);
 
        intel_wakeref_t wakeref;
        u32 events = 0;
 
-       spin_lock_irq(>->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        events = fetch_and_zero(&pxp->session_events);
-       spin_unlock_irq(>->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        if (!events)
                return;