#define IVPU_MMU_REG_CR0                     0x00200020u
 #define IVPU_MMU_REG_CR0ACK                  0x00200024u
 #define IVPU_MMU_REG_CR0ACK_VAL_MASK         GENMASK(31, 0)
+#define IVPU_MMU_REG_CR0_ATSCHK_MASK         BIT(4)
+#define IVPU_MMU_REG_CR0_CMDQEN_MASK         BIT(3)
+#define IVPU_MMU_REG_CR0_EVTQEN_MASK         BIT(2)
+#define IVPU_MMU_REG_CR0_PRIQEN_MASK         BIT(1)
+#define IVPU_MMU_REG_CR0_SMMUEN_MASK         BIT(0)
+
 #define IVPU_MMU_REG_CR1                     0x00200028u
 #define IVPU_MMU_REG_CR2                     0x0020002cu
 #define IVPU_MMU_REG_IRQ_CTRL                0x00200050u
 #define IVPU_MMU_IRQ_EVTQ_EN           BIT(2)
 #define IVPU_MMU_IRQ_GERROR_EN         BIT(0)
 
-#define IVPU_MMU_CR0_ATSCHK            BIT(4)
-#define IVPU_MMU_CR0_CMDQEN            BIT(3)
-#define IVPU_MMU_CR0_EVTQEN            BIT(2)
-#define IVPU_MMU_CR0_PRIQEN            BIT(1)
-#define IVPU_MMU_CR0_SMMUEN            BIT(0)
-
 #define IVPU_MMU_CR1_TABLE_SH          GENMASK(11, 10)
 #define IVPU_MMU_CR1_TABLE_OC          GENMASK(9, 8)
 #define IVPU_MMU_CR1_TABLE_IC          GENMASK(7, 6)
        REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
        REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
 
-       val = IVPU_MMU_CR0_CMDQEN;
+       val = REG_SET_FLD(IVPU_MMU_REG_CR0, CMDQEN, 0);
        ret = ivpu_mmu_reg_write_cr0(vdev, val);
        if (ret)
                return ret;
        REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
        REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
 
-       val |= IVPU_MMU_CR0_EVTQEN;
+       val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
        ret = ivpu_mmu_reg_write_cr0(vdev, val);
        if (ret)
                return ret;
 
-       val |= IVPU_MMU_CR0_ATSCHK;
+       val = REG_SET_FLD(IVPU_MMU_REG_CR0, ATSCHK, val);
        ret = ivpu_mmu_reg_write_cr0(vdev, val);
        if (ret)
                return ret;
        if (ret)
                return ret;
 
-       val |= IVPU_MMU_CR0_SMMUEN;
+       val = REG_SET_FLD(IVPU_MMU_REG_CR0, SMMUEN, val);
        return ivpu_mmu_reg_write_cr0(vdev, val);
 }
 
        return evt;
 }
 
-static int ivpu_mmu_disable_events(struct ivpu_device *vdev, u32 ssid)
+static int ivpu_mmu_evtq_set(struct ivpu_device *vdev, bool enable)
+{
+       u32 val = REGV_RD32(IVPU_MMU_REG_CR0);
+
+       if (enable)
+               val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+       else
+               val = REG_CLR_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+       REGV_WR32(IVPU_MMU_REG_CR0, val);
+
+       return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
+}
+
+static int ivpu_mmu_evtq_enable(struct ivpu_device *vdev)
+{
+       return ivpu_mmu_evtq_set(vdev, true);
+}
+
+static int ivpu_mmu_evtq_disable(struct ivpu_device *vdev)
+{
+       return ivpu_mmu_evtq_set(vdev, false);
+}
+
+void ivpu_mmu_discard_events(struct ivpu_device *vdev)
+{
+       /*
+        * Disable event queue (stop MMU from updating the producer)
+        * to allow synchronization of consumer and producer indexes
+        */
+       ivpu_mmu_evtq_disable(vdev);
+
+       vdev->mmu->evtq.cons = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+       REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+       vdev->mmu->evtq.prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+
+       ivpu_mmu_evtq_enable(vdev);
+
+       drm_WARN_ON_ONCE(&vdev->drm, vdev->mmu->evtq.cons != vdev->mmu->evtq.prod);
+}
+
+int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid)
 {
        struct ivpu_mmu_info *mmu = vdev->mmu;
        struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
                clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
 
        ivpu_mmu_cmdq_write_cfgi_all(vdev);
+       ivpu_mmu_cmdq_sync(vdev);
 
        return 0;
 }
 void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
 {
        struct ivpu_file_priv *file_priv;
-       u32 last_ssid = -1;
        u32 *event;
        u32 ssid;
 
        ivpu_dbg(vdev, IRQ, "MMU event queue\n");
 
        while ((event = ivpu_mmu_get_event(vdev))) {
-               ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
-
-               if (ssid == last_ssid)
-                       continue;
+               ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, *event);
+               if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
+                       ivpu_mmu_dump_event(vdev, event);
+                       ivpu_pm_trigger_recovery(vdev, "MMU event");
+                       return;
+               }
 
-               xa_lock(&vdev->context_xa);
                file_priv = xa_load(&vdev->context_xa, ssid);
                if (file_priv) {
-                       if (file_priv->has_mmu_faults) {
-                               event = NULL;
-                       } else {
-                               ivpu_mmu_disable_events(vdev, ssid);
-                               file_priv->has_mmu_faults = true;
+                       if (!READ_ONCE(file_priv->has_mmu_faults)) {
+                               ivpu_mmu_dump_event(vdev, event);
+                               WRITE_ONCE(file_priv->has_mmu_faults, true);
                        }
                }
-               xa_unlock(&vdev->context_xa);
-
-               if (event)
-                       ivpu_mmu_dump_event(vdev, event);
-
-               if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
-                       ivpu_pm_trigger_recovery(vdev, "MMU event");
-                       return;
-               }
-               REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
        }
 
        queue_work(system_wq, &vdev->context_abort_work);