seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 }
 
-static void describe_ctx(struct seq_file *m, struct i915_gem_context *ctx)
-{
-       seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
-       seq_putc(m, ctx->remap_slice ? 'R' : 'r');
-       seq_putc(m, ' ');
-}
-
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
-       enum intel_engine_id id;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
                return ret;
 
        list_for_each_entry(ctx, &dev_priv->context_list, link) {
-               if (!i915.enable_execlists &&
-                   ctx->legacy_hw_ctx.rcs_state == NULL)
-                       continue;
-
                seq_printf(m, "HW context %u ", ctx->hw_id);
                if (IS_ERR(ctx->file_priv)) {
                        seq_puts(m, "(deleted) ");
                        seq_puts(m, "(kernel) ");
                }
 
-               describe_ctx(m, ctx);
+               seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+               seq_putc(m, '\n');
 
-               if (i915.enable_execlists) {
+               for_each_engine(engine, dev_priv) {
+                       struct intel_context *ce = &ctx->engine[engine->id];
+
+                       seq_printf(m, "%s: ", engine->name);
+                       seq_putc(m, ce->initialised ? 'I' : 'i');
+                       if (ce->state)
+                               describe_obj(m, ce->state);
+                       if (ce->ringbuf)
+                               describe_ctx_ringbuf(m, ce->ringbuf);
                        seq_putc(m, '\n');
-                       for_each_engine_id(engine, dev_priv, id) {
-                               struct drm_i915_gem_object *ctx_obj =
-                                       ctx->engine[id].state;
-                               struct intel_ringbuffer *ringbuf =
-                                       ctx->engine[id].ringbuf;
-
-                               seq_printf(m, "%s: ", engine->name);
-                               if (ctx_obj)
-                                       describe_obj(m, ctx_obj);
-                               if (ringbuf)
-                                       describe_ctx_ringbuf(m, ringbuf);
-                               seq_putc(m, '\n');
-                       }
-               } else {
-                       describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
                }
 
                seq_putc(m, '\n');
                              struct i915_gem_context *ctx,
                              struct intel_engine_cs *engine)
 {
+       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
        struct page *page;
        uint32_t *reg_state;
        int j;
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
        unsigned long ggtt_offset = 0;
 
        seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
 
 void i915_gem_context_free(struct kref *ctx_ref)
 {
        struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
+       int i;
 
        lockdep_assert_held(&ctx->i915->dev->struct_mutex);
        trace_i915_context_free(ctx);
 
-       if (i915.enable_execlists)
-               intel_lr_context_free(ctx);
-
        /*
         * This context is going away and we need to remove all VMAs still
         * around. This is to handle imported shared objects for which
 
        i915_ppgtt_put(ctx->ppgtt);
 
-       if (ctx->legacy_hw_ctx.rcs_state)
-               drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
+       for (i = 0; i < I915_NUM_ENGINES; i++) {
+               struct intel_context *ce = &ctx->engine[i];
+
+               if (!ce->state)
+                       continue;
+
+               WARN_ON(ce->pin_count);
+               if (ce->ringbuf)
+                       intel_ringbuffer_free(ce->ringbuf);
+
+               drm_gem_object_unreference(&ce->state->base);
+       }
+
        list_del(&ctx->link);
 
        ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
                        ret = PTR_ERR(obj);
                        goto err_out;
                }
-               ctx->legacy_hw_ctx.rcs_state = obj;
+               ctx->engine[RCS].state = obj;
        }
 
        /* Default context will never have a file_priv */
        if (i915.enable_execlists) {
                intel_lr_context_unpin(ctx, engine);
        } else {
-               if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
-                       i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
+               struct intel_context *ce = &ctx->engine[engine->id];
+
+               if (ce->state)
+                       i915_gem_object_ggtt_unpin(ce->state);
+
                i915_gem_context_unreference(ctx);
        }
 }
                return PTR_ERR(ctx);
        }
 
-       if (ctx->legacy_hw_ctx.rcs_state) {
+       if (!i915.enable_execlists && ctx->engine[RCS].state) {
                int ret;
 
                /* We may need to do things with the shrinker which
                 * be available. To avoid this we always pin the default
                 * context.
                 */
-               ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
+               ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state,
                                            get_context_alignment(dev_priv), 0);
                if (ret) {
                        DRM_ERROR("Failed to pinned default global context (error %d)\n",
        lockdep_assert_held(&dev_priv->dev->struct_mutex);
 
        for_each_engine(engine, dev_priv) {
-               if (engine->last_context == NULL)
-                       continue;
+               if (engine->last_context) {
+                       i915_gem_context_unpin(engine->last_context, engine);
+                       engine->last_context = NULL;
+               }
 
-               i915_gem_context_unpin(engine->last_context, engine);
-               engine->last_context = NULL;
+               /* Force the GPU state to be reinitialised on enabling */
+               dev_priv->kernel_context->engine[engine->id].initialised =
+                       engine->init_context == NULL;
        }
 
        /* Force the GPU state to be reinitialised on enabling */
-       dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
        dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
 }
 
 
        lockdep_assert_held(&dev->struct_mutex);
 
-       if (dctx->legacy_hw_ctx.rcs_state)
-               i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
+       if (!i915.enable_execlists && dctx->engine[RCS].state)
+               i915_gem_object_ggtt_unpin(dctx->engine[RCS].state);
 
        i915_gem_context_unreference(dctx);
        dev_priv->kernel_context = NULL;
        intel_ring_emit(engine, MI_NOOP);
        intel_ring_emit(engine, MI_SET_CONTEXT);
        intel_ring_emit(engine,
-                       i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+                       i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
                        flags);
        /*
         * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
        if (to->remap_slice)
                return false;
 
-       if (!to->legacy_hw_ctx.initialized)
+       if (!to->engine[RCS].initialised)
                return false;
 
        if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
                return 0;
 
        /* Trying to pin first makes error handling easier. */
-       ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
+       ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
                                    get_context_alignment(engine->i915),
                                    0);
        if (ret)
         *
         * XXX: We need a real interface to do this instead of trickery.
         */
-       ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
+       ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
        if (ret)
                goto unpin_out;
 
                        goto unpin_out;
        }
 
-       if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
+       if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
                /* NB: If we inhibit the restore, the context is not allowed to
                 * die because future work may end up depending on valid address
                 * space. This means we must enforce that a page table load
         * MI_SET_CONTEXT instead of when the next seqno has completed.
         */
        if (from != NULL) {
-               from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-               i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
+               from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+               i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
                 * object dirty. The only exception is that the context must be
                 * able to defer doing this until we know the object would be
                 * swapped, but there is no way to do that yet.
                 */
-               from->legacy_hw_ctx.rcs_state->dirty = 1;
+               from->engine[RCS].state->dirty = 1;
 
                /* obj is kept alive until the next request by its active ref */
-               i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
+               i915_gem_object_ggtt_unpin(from->engine[RCS].state);
                i915_gem_context_unreference(from);
        }
        i915_gem_context_reference(to);
                to->remap_slice &= ~(1<<i);
        }
 
-       if (!to->legacy_hw_ctx.initialized) {
+       if (!to->engine[RCS].initialised) {
                if (engine->init_context) {
                        ret = engine->init_context(req);
                        if (ret)
                                return ret;
                }
-               to->legacy_hw_ctx.initialized = true;
+               to->engine[RCS].initialised = true;
        }
 
        return 0;
 
 unpin_out:
-       i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+       i915_gem_object_ggtt_unpin(to->engine[RCS].state);
        return ret;
 }
 
        WARN_ON(i915.enable_execlists);
        lockdep_assert_held(&req->i915->dev->struct_mutex);
 
-       if (engine->id != RCS ||
-           req->ctx->legacy_hw_ctx.rcs_state == NULL) {
+       if (!req->ctx->engine[engine->id].state) {
                struct i915_gem_context *to = req->ctx;
                struct i915_hw_ppgtt *ppgtt =
                        to->ppgtt ?: req->i915->mm.aliasing_ppgtt;