if (IS_ERR(vgpu->shadow_ctx))
                return PTR_ERR(vgpu->shadow_ctx);
 
-       vgpu->shadow_ctx->engine[RCS].initialised = true;
-
        bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
 
        return 0;
 
                        struct intel_context *ce = &ctx->engine[engine->id];
 
                        seq_printf(m, "%s: ", engine->name);
-                       seq_putc(m, ce->initialised ? 'I' : 'i');
                        if (ce->state)
                                describe_obj(m, ce->state->obj);
                        if (ce->ring)
 
                 */
                value = 1;
                break;
+       case I915_PARAM_HAS_CONTEXT_ISOLATION:
+               value = intel_engines_has_context_isolation(dev_priv);
+               break;
        case I915_PARAM_SLICE_MASK:
                value = INTEL_INFO(dev_priv)->sseu.slice_mask;
                if (!value)
 
        return true;
 }
 
+static int __intel_engines_record_defaults(struct drm_i915_private *i915)
+{
+       struct i915_gem_context *ctx;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       int err;
+
+       /*
+        * As we reset the gpu during very early sanitisation, the current
+        * register state on the GPU should reflect its defaults values.
+        * We load a context onto the hw (with restore-inhibit), then switch
+        * over to a second context to save that default register state. We
+        * can then prime every new context with that state so they all start
+        * from the same default HW values.
+        */
+
+       ctx = i915_gem_context_create_kernel(i915, 0);
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+
+       for_each_engine(engine, i915, id) {
+               struct drm_i915_gem_request *rq;
+
+               rq = i915_gem_request_alloc(engine, ctx);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       goto out_ctx;
+               }
+
+               err = i915_switch_context(rq);
+               if (engine->init_context)
+                       err = engine->init_context(rq);
+
+               __i915_add_request(rq, true);
+               if (err)
+                       goto err_active;
+       }
+
+       err = i915_gem_switch_to_kernel_context(i915);
+       if (err)
+               goto err_active;
+
+       err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+       if (err)
+               goto err_active;
+
+       assert_kernel_context_is_current(i915);
+
+       for_each_engine(engine, i915, id) {
+               struct i915_vma *state;
+
+               state = ctx->engine[id].state;
+               if (!state)
+                       continue;
+
+               /*
+                * As we will hold a reference to the logical state, it will
+                * not be torn down with the context, and importantly the
+                * object will hold onto its vma (making it possible for a
+                * stray GTT write to corrupt our defaults). Unmap the vma
+                * from the GTT to prevent such accidents and reclaim the
+                * space.
+                */
+               err = i915_vma_unbind(state);
+               if (err)
+                       goto err_active;
+
+               err = i915_gem_object_set_to_cpu_domain(state->obj, false);
+               if (err)
+                       goto err_active;
+
+               engine->default_state = i915_gem_object_get(state->obj);
+       }
+
+       if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
+               unsigned int found = intel_engines_has_context_isolation(i915);
+
+               /*
+                * Make sure that classes with multiple engine instances all
+                * share the same basic configuration.
+                */
+               for_each_engine(engine, i915, id) {
+                       unsigned int bit = BIT(engine->uabi_class);
+                       unsigned int expected = engine->default_state ? bit : 0;
+
+                       if ((found & bit) != expected) {
+                               DRM_ERROR("mismatching default context state for class %d on engine %s\n",
+                                         engine->uabi_class, engine->name);
+                       }
+               }
+       }
+
+out_ctx:
+       i915_gem_context_set_closed(ctx);
+       i915_gem_context_put(ctx);
+       return err;
+
+err_active:
+       /*
+        * If we have to abandon now, we expect the engines to be idle
+        * and ready to be torn-down. First try to flush any remaining
+        * request, ensure we are pointing at the kernel context and
+        * then remove it.
+        */
+       if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
+               goto out_ctx;
+
+       if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
+               goto out_ctx;
+
+       i915_gem_contexts_lost(i915);
+       goto out_ctx;
+}
+
 int i915_gem_init(struct drm_i915_private *dev_priv)
 {
        int ret;
         */
        intel_init_clock_gating(dev_priv);
 
+       ret = __intel_engines_record_defaults(dev_priv);
 out_unlock:
        if (ret == -EIO) {
                /* Allow engine initialisation to fail by marking the GPU as
 
        return ctx;
 }
 
-static struct i915_gem_context *
-create_kernel_context(struct drm_i915_private *i915, int prio)
+struct i915_gem_context *
+i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 {
        struct i915_gem_context *ctx;
 
        ida_init(&dev_priv->contexts.hw_ida);
 
        /* lowest priority; idle task */
-       ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN);
+       ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
        if (IS_ERR(ctx)) {
                DRM_ERROR("Failed to create default global context\n");
                err = PTR_ERR(ctx);
        dev_priv->kernel_context = ctx;
 
        /* highest priority; preempting task */
-       ctx = create_kernel_context(dev_priv, INT_MAX);
+       ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
        if (IS_ERR(ctx)) {
                DRM_ERROR("Failed to create default preempt context\n");
                err = PTR_ERR(ctx);
                engine->context_unpin(engine, engine->last_retired_context);
                engine->last_retired_context = NULL;
        }
-
-       /* Force the GPU state to be restored on enabling */
-       if (!i915_modparams.enable_execlists) {
-               struct i915_gem_context *ctx;
-
-               list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
-                       if (!i915_gem_context_is_default(ctx))
-                               continue;
-
-                       for_each_engine(engine, dev_priv, id)
-                               ctx->engine[engine->id].initialised = false;
-
-                       ctx->remap_slice = ALL_L3_SLICES(dev_priv);
-               }
-
-               for_each_engine(engine, dev_priv, id) {
-                       struct intel_context *kce =
-                               &dev_priv->kernel_context->engine[engine->id];
-
-                       kce->initialised = true;
-               }
-       }
 }
 
 void i915_gem_contexts_fini(struct drm_i915_private *i915)
        if (to->remap_slice)
                return false;
 
-       if (!to->engine[RCS].initialised)
-               return false;
-
        if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
                return false;
 
                        return ret;
        }
 
-       if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
-               /* NB: If we inhibit the restore, the context is not allowed to
-                * die because future work may end up depending on valid address
-                * space. This means we must enforce that a page table load
-                * occur when this occurs. */
+       if (i915_gem_context_is_kernel(to))
+               /*
+                * The kernel context(s) is treated as pure scratch and is not
+                * expected to retain any state (as we sacrifice it during
+                * suspend and on resume it may be corrupted). This is ok,
+                * as nothing actually executes using the kernel context; it
+                * is purely used for flushing user contexts.
+                */
                hw_flags = MI_RESTORE_INHIBIT;
        else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
                hw_flags = MI_FORCE_RESTORE;
                to->remap_slice &= ~(1<<i);
        }
 
-       if (!to->engine[RCS].initialised) {
-               if (engine->init_context) {
-                       ret = engine->init_context(req);
-                       if (ret)
-                               return ret;
-               }
-               to->engine[RCS].initialised = true;
-       }
-
        return 0;
 }
 
 
                u32 *lrc_reg_state;
                u64 lrc_desc;
                int pin_count;
-               bool initialised;
        } engine[I915_NUM_ENGINES];
 
        /** ring_size: size for allocating the per-engine ring buffer */
 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
                                       struct drm_file *file);
 
+struct i915_gem_context *
+i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
+
 static inline struct i915_gem_context *
 i915_gem_context_get(struct i915_gem_context *ctx)
 {
 
        intel_engine_cleanup_cmd_parser(engine);
        i915_gem_batch_pool_fini(&engine->batch_pool);
 
+       if (engine->default_state)
+               i915_gem_object_put(engine->default_state);
+
        if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
                engine->context_unpin(engine, engine->i915->preempt_context);
        engine->context_unpin(engine, engine->i915->kernel_context);
        }
 }
 
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       unsigned int which;
+
+       which = 0;
+       for_each_engine(engine, i915, id)
+               if (engine->default_state)
+                       which |= BIT(engine->uabi_class);
+
+       return which;
+}
+
 static void print_request(struct drm_printer *m,
                          struct drm_i915_gem_request *rq,
                          const char *prefix)
 
        struct intel_engine_cs *engine = request->engine;
        struct intel_context *ce = &request->ctx->engine[engine->id];
        u32 *cs;
-       int ret;
 
        GEM_BUG_ON(!ce->pin_count);
 
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
-       if (!ce->initialised) {
-               ret = engine->init_context(request);
-               if (ret)
-                       return ret;
-
-               ce->initialised = true;
-       }
-
        /* Note that after this point, we have committed to using
         * this request as it is being used to both track the
         * state of engine initialisation and liveness of the
 
        CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
                _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
-                                  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
                                   (HAS_RESOURCE_STREAMER(dev_priv) ?
                                   CTX_CTRL_RS_CTX_ENABLE : 0)));
        CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
                    struct intel_ring *ring)
 {
        void *vaddr;
+       u32 *regs;
        int ret;
 
        ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
        }
        ctx_obj->mm.dirty = true;
 
+       if (engine->default_state) {
+               /*
+                * We only want to copy over the template context state;
+                * skipping over the headers reserved for GuC communication,
+                * leaving those as zero.
+                */
+               const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
+               void *defaults;
+
+               defaults = i915_gem_object_pin_map(engine->default_state,
+                                                  I915_MAP_WB);
+               if (IS_ERR(defaults))
+                       return PTR_ERR(defaults);
+
+               memcpy(vaddr + start, defaults + start, engine->context_size);
+               i915_gem_object_unpin_map(engine->default_state);
+       }
+
        /* The second page of the context object contains some fields which must
         * be set up prior to the first execution. */
-
-       execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
-                                ctx, engine, ring);
+       regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
+       execlists_init_reg_state(regs, ctx, engine, ring);
+       if (!engine->default_state)
+               regs[CTX_CONTEXT_CONTROL + 1] |=
+                       _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
 
        i915_gem_object_unpin_map(ctx_obj);
 
 
        ce->ring = ring;
        ce->state = vma;
-       ce->initialised |= engine->init_context == NULL;
 
        return 0;
 
 
        struct drm_i915_private *i915 = engine->i915;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
+       int err;
 
        obj = i915_gem_object_create(i915, engine->context_size);
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
+       if (engine->default_state) {
+               void *defaults, *vaddr;
+
+               vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+               if (IS_ERR(vaddr)) {
+                       err = PTR_ERR(vaddr);
+                       goto err_obj;
+               }
+
+               defaults = i915_gem_object_pin_map(engine->default_state,
+                                                  I915_MAP_WB);
+               if (IS_ERR(defaults)) {
+                       err = PTR_ERR(defaults);
+                       goto err_map;
+               }
+
+               memcpy(vaddr, defaults, engine->context_size);
+
+               i915_gem_object_unpin_map(engine->default_state);
+               i915_gem_object_unpin_map(obj);
+       }
+
        /*
         * Try to make the context utilize L3 as well as LLC.
         *
        }
 
        vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
-       if (IS_ERR(vma))
-               i915_gem_object_put(obj);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err_obj;
+       }
 
        return vma;
+
+err_map:
+       i915_gem_object_unpin_map(obj);
+err_obj:
+       i915_gem_object_put(obj);
+       return ERR_PTR(err);
 }
 
 static struct intel_ring *
                ce->state->obj->pin_global++;
        }
 
-       /* The kernel context is only used as a placeholder for flushing the
-        * active context. It is never used for submitting user rendering and
-        * as such never requires the golden render context, and so we can skip
-        * emitting it when we switch to the kernel context. This is required
-        * as during eviction we cannot allocate and pin the renderstate in
-        * order to initialise the context.
-        */
-       if (i915_gem_context_is_kernel(ctx))
-               ce->initialised = true;
-
        i915_gem_context_get(ctx);
 
 out:
 
        struct intel_ring *buffer;
        struct intel_timeline *timeline;
 
+       struct drm_i915_gem_object *default_state;
        struct intel_render_state *render_state;
 
        atomic_t irq_count;
 void intel_engines_unpark(struct drm_i915_private *i915);
 
 void intel_engines_reset_default_submission(struct drm_i915_private *i915);
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
 
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
 
 
  */
 #define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
 
+/*
+ * Query whether every context (both per-file default and user created) is
+ * isolated (insofar as HW supports). If this parameter is not true, then
+ * freshly created contexts may inherit values from an existing context,
+ * rather than default HW values. If true, it also ensures (insofar as HW
+ * supports) that all state set by this context will not leak to any other
+ * context.
+ *
+ * As not every engine across every gen support contexts, the returned
+ * value reports the support of context isolation for individual engines by
+ * returning a bitmask of each engine class set to true if that class supports
+ * isolation.
+ */
+#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
+
 typedef struct drm_i915_getparam {
        __s32 param;
        /*