void *vaddr;
        int ret;
 
+       INIT_LIST_HEAD(&engine->status_page.timelines);
+
        /*
         * Though the HWS register does support 36bit addresses, historically
         * we have had hangs and corruption reported due to wild writes if
        return ce;
 }
 
+static void destroy_pinned_context(struct intel_context *ce)
+{
+       struct intel_engine_cs *engine = ce->engine;
+       struct i915_vma *hwsp = engine->status_page.vma;
+
+       GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
+
+       mutex_lock(&hwsp->vm->mutex);
+       list_del(&ce->timeline->engine_link);
+       mutex_unlock(&hwsp->vm->mutex);
+
+       intel_context_unpin(ce);
+       intel_context_put(ce);
+}
+
 static struct intel_context *
 create_kernel_context(struct intel_engine_cs *engine)
 {
        GEM_BUG_ON(!list_empty(&engine->active.requests));
        tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
 
-       cleanup_status_page(engine);
        intel_breadcrumbs_free(engine->breadcrumbs);
 
        intel_engine_fini_retire(engine);
        if (engine->default_state)
                fput(engine->default_state);
 
-       if (engine->kernel_context) {
-               intel_context_unpin(engine->kernel_context);
-               intel_context_put(engine->kernel_context);
-       }
+       if (engine->kernel_context)
+               destroy_pinned_context(engine->kernel_context);
+
        GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+       cleanup_status_page(engine);
 
        intel_wa_list_free(&engine->ctx_wa_list);
        intel_wa_list_free(&engine->wa_list);
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
-       for_each_engine(engine, gt, id)
+       for_each_engine(engine, gt, id) {
+               if (engine->sanitize)
+                       engine->sanitize(engine);
+
                engine->set_default_submission(engine);
+       }
 }
 
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
 
 
                /* Scrub the context image after our loss of control */
                ce->ops->reset(ce);
+
+               CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
+                        ce->timeline->seqno,
+                        READ_ONCE(*ce->timeline->hwsp_seqno),
+                        ce->ring->emit);
+               GEM_BUG_ON(ce->timeline->seqno !=
+                          READ_ONCE(*ce->timeline->hwsp_seqno));
        }
 
        if (engine->unpark)
 
 #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
 
 struct intel_hw_status_page {
+       struct list_head timelines;
        struct i915_vma *vma;
        u32 *addr;
 };
 
        GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
 }
 
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+       struct intel_timeline *tl;
+
+       list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+               intel_timeline_reset_seqno(tl);
+}
+
 static void execlists_sanitize(struct intel_engine_cs *engine)
 {
        GEM_BUG_ON(execlists_active(&engine->execlists));
         * that may be lost on resume/initialisation, and so we need to
         * reset the value in the HWSP.
         */
-       intel_timeline_reset_seqno(engine->kernel_context->timeline);
+       sanitize_hwsp(engine);
 
        /* And scrub the dirty cachelines for the HWSP */
        clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
 
 
 void lrc_reset(struct intel_context *ce)
 {
-       CE_TRACE(ce, "reset\n");
        GEM_BUG_ON(!intel_context_is_pinned(ce));
 
        intel_ring_reset(ce->ring, ce->ring->emit);
 
        return ret;
 }
 
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+       struct intel_timeline *tl;
+
+       list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+               intel_timeline_reset_seqno(tl);
+}
+
+static void xcs_sanitize(struct intel_engine_cs *engine)
+{
+       /*
+        * Poison residual state on resume, in case the suspend didn't!
+        *
+        * We have to assume that across suspend/resume (or other loss
+        * of control) that the contents of our pinned buffers has been
+        * lost, replaced by garbage. Since this doesn't always happen,
+        * let's poison such state so that we more quickly spot when
+        * we falsely assume it has been preserved.
+        */
+       if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+               memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+       /*
+        * The kernel_context HWSP is stored in the status_page. As above,
+        * that may be lost on resume/initialisation, and so we need to
+        * reset the value in the HWSP.
+        */
+       sanitize_hwsp(engine);
+
+       /* And scrub the dirty cachelines for the HWSP */
+       clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
 static void reset_prepare(struct intel_engine_cs *engine)
 {
        struct intel_uncore *uncore = engine->uncore;
        setup_irq(engine);
 
        engine->resume = xcs_resume;
+       engine->sanitize = xcs_sanitize;
+
        engine->reset.prepare = reset_prepare;
        engine->reset.rewind = reset_rewind;
        engine->reset.cancel = reset_cancel;
 
        return timeline;
 }
 
+struct intel_timeline *
+intel_timeline_create_from_engine(struct intel_engine_cs *engine,
+                                 unsigned int offset)
+{
+       struct i915_vma *hwsp = engine->status_page.vma;
+       struct intel_timeline *tl;
+
+       tl = __intel_timeline_create(engine->gt, hwsp, offset);
+       if (IS_ERR(tl))
+               return tl;
+
+       /* Borrow a nearby lock; we only create these timelines during init */
+       mutex_lock(&hwsp->vm->mutex);
+       list_add_tail(&tl->engine_link, &engine->status_page.timelines);
+       mutex_unlock(&hwsp->vm->mutex);
+
+       return tl;
+}
+
 void __intel_timeline_pin(struct intel_timeline *tl)
 {
        GEM_BUG_ON(!atomic_read(&tl->pin_count));
 
        return __intel_timeline_create(gt, NULL, 0);
 }
 
-static inline struct intel_timeline *
+struct intel_timeline *
 intel_timeline_create_from_engine(struct intel_engine_cs *engine,
-                                 unsigned int offset)
-{
-       return __intel_timeline_create(engine->gt,
-                                      engine->status_page.vma,
-                                      offset);
-}
+                                 unsigned int offset);
 
 static inline struct intel_timeline *
 intel_timeline_get(struct intel_timeline *timeline)
 
        struct list_head link;
        struct intel_gt *gt;
 
+       struct list_head engine_link;
+
        struct kref kref;
        struct rcu_head rcu;
 };