write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
+{
+       if (INTEL_GEN(engine->i915) >= 12)
+               return 0x60;
+       else if (INTEL_GEN(engine->i915) >= 9)
+               return 0x54;
+       else if (engine->class == RENDER_CLASS)
+               return 0x58;
+       else
+               return -1;
+}
+
+static void
+execlists_check_context(const struct intel_context *ce,
+                       const struct intel_engine_cs *engine)
+{
+       const struct intel_ring *ring = ce->ring;
+       u32 *regs = ce->lrc_reg_state;
+       bool valid = true;
+       int x;
+
+       if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
+               pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
+                      engine->name,
+                      regs[CTX_RING_START],
+                      i915_ggtt_offset(ring->vma));
+               regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+               valid = false;
+       }
+
+       if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
+           (RING_CTL_SIZE(ring->size) | RING_VALID)) {
+               pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
+                      engine->name,
+                      regs[CTX_RING_CTL],
+                      (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
+               regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+               valid = false;
+       }
+
+       x = lrc_ring_mi_mode(engine);
+       if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
+               pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
+                      engine->name, regs[x + 1]);
+               regs[x + 1] &= ~STOP_RING;
+               regs[x + 1] |= STOP_RING << 16;
+               valid = false;
+       }
+
+       WARN_ONCE(!valid, "Invalid lrc state found before submission\n");
+}
+
 static inline struct intel_engine_cs *
 __execlists_schedule_in(struct i915_request *rq)
 {
 
        intel_context_get(ce);
 
+       if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+               execlists_check_context(ce, rq->engine);
+
        if (ce->tag) {
                /* Use a fixed tag for OA and friends */
                ce->lrc_desc |= (u64)ce->tag << 32;
        GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
        GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
 
-       regs[CTX_RING_BUFFER_START] = i915_ggtt_offset(ring->vma);
+       regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
        regs[CTX_RING_HEAD] = ring->head;
        regs[CTX_RING_TAIL] = ring->tail;
 
                               &execlists->csb_status[reset_value]);
 }
 
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
-{
-       if (INTEL_GEN(engine->i915) >= 12)
-               return 0x60;
-       else if (INTEL_GEN(engine->i915) >= 9)
-               return 0x54;
-       else if (engine->class == RENDER_CLASS)
-               return 0x58;
-       else
-               return -1;
-}
-
 static void __execlists_reset_reg_state(const struct intel_context *ce,
                                        const struct intel_engine_cs *engine)
 {
                        _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
                                            CTX_CTRL_RS_CTX_ENABLE);
 
-       regs[CTX_RING_BUFFER_CONTROL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+       regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
        regs[CTX_BB_STATE] = RING_BB_PPGTT;
 }