static void execlists_sanitize(struct intel_engine_cs *engine)
 {
+       /*
+        * Poison residual state on resume, in case the suspend didn't!
+        *
+        * We have to assume that across suspend/resume (or other loss
+        * of control) that the contents of our pinned buffers has been
+        * lost, replaced by garbage. Since this doesn't always happen,
+        * let's poison such state so that we more quickly spot when
+        * we falsely assume it has been preserved.
+        */
+       if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+               memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
        reset_csb_pointers(engine);
+
+       /*
+        * The kernel_context HWSP is stored in the status_page. As above,
+        * that may be lost on resume/initialisation, and so we need to
+        * reset the value in the HWSP.
+        */
+       intel_timeline_reset_seqno(engine->kernel_context->timeline);
 }
 
 static void enable_error_interrupt(struct intel_engine_cs *engine)
 
 static void execlists_release(struct intel_engine_cs *engine)
 {
+       engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
        execlists_shutdown(engine);
 
        intel_engine_cleanup_common(engine);
 {
        /* Default vfuncs which can be overriden by each engine. */
 
-       engine->sanitize = execlists_sanitize;
        engine->resume = execlists_resume;
 
        engine->cops = &execlists_context_ops;
                execlists->csb_size = GEN11_CSB_ENTRIES;
 
        /* Finally, take ownership and responsibility for cleanup! */
+       engine->sanitize = execlists_sanitize;
        engine->release = execlists_release;
 
        return 0;
 
        return 0;
 }
 
+void intel_timeline_reset_seqno(const struct intel_timeline *tl)
+{
+       /* Must be pinned to be writable, and no requests in flight. */
+       GEM_BUG_ON(!atomic_read(&tl->pin_count));
+       WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+}
+
 void intel_timeline_enter(struct intel_timeline *tl)
 {
        struct intel_gt_timelines *timelines = &tl->gt->timelines;
                return;
 
        spin_lock(&timelines->lock);
-       if (!atomic_fetch_inc(&tl->active_count))
+       if (!atomic_fetch_inc(&tl->active_count)) {
+               /*
+                * The HWSP is volatile, and may have been lost while inactive,
+                * e.g. across suspend/resume. Be paranoid, and ensure that
+                * the HWSP value matches our seqno so we don't proclaim
+                * the next request as already complete.
+                */
+               intel_timeline_reset_seqno(tl);
                list_add_tail(&tl->link, &timelines->active_list);
+       }
        spin_unlock(&timelines->lock);
 }