struct kmem_cache *slab_ce;
 } global;
 
-struct intel_context *intel_context_alloc(void)
+static struct intel_context *intel_context_alloc(void)
 {
        return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
 }
 }
 
 struct intel_context *
-intel_context_lookup(struct i915_gem_context *ctx,
+intel_context_create(struct i915_gem_context *ctx,
                     struct intel_engine_cs *engine)
 {
-       struct intel_context *ce = NULL;
-       struct rb_node *p;
-
-       spin_lock(&ctx->hw_contexts_lock);
-       p = ctx->hw_contexts.rb_node;
-       while (p) {
-               struct intel_context *this =
-                       rb_entry(p, struct intel_context, node);
-
-               if (this->engine == engine) {
-                       GEM_BUG_ON(this->gem_context != ctx);
-                       ce = this;
-                       break;
-               }
-
-               if (this->engine < engine)
-                       p = p->rb_right;
-               else
-                       p = p->rb_left;
-       }
-       spin_unlock(&ctx->hw_contexts_lock);
-
-       return ce;
-}
-
-struct intel_context *
-__intel_context_insert(struct i915_gem_context *ctx,
-                      struct intel_engine_cs *engine,
-                      struct intel_context *ce)
-{
-       struct rb_node **p, *parent;
-       int err = 0;
-
-       spin_lock(&ctx->hw_contexts_lock);
-
-       parent = NULL;
-       p = &ctx->hw_contexts.rb_node;
-       while (*p) {
-               struct intel_context *this;
-
-               parent = *p;
-               this = rb_entry(parent, struct intel_context, node);
-
-               if (this->engine == engine) {
-                       err = -EEXIST;
-                       ce = this;
-                       break;
-               }
-
-               if (this->engine < engine)
-                       p = &parent->rb_right;
-               else
-                       p = &parent->rb_left;
-       }
-       if (!err) {
-               rb_link_node(&ce->node, parent, p);
-               rb_insert_color(&ce->node, &ctx->hw_contexts);
-       }
-
-       spin_unlock(&ctx->hw_contexts_lock);
-
-       return ce;
-}
-
-void __intel_context_remove(struct intel_context *ce)
-{
-       struct i915_gem_context *ctx = ce->gem_context;
-
-       spin_lock(&ctx->hw_contexts_lock);
-       rb_erase(&ce->node, &ctx->hw_contexts);
-       spin_unlock(&ctx->hw_contexts_lock);
-}
-
-struct intel_context *
-intel_context_instance(struct i915_gem_context *ctx,
-                      struct intel_engine_cs *engine)
-{
-       struct intel_context *ce, *pos;
-
-       ce = intel_context_lookup(ctx, engine);
-       if (likely(ce))
-               return intel_context_get(ce);
+       struct intel_context *ce;
 
        ce = intel_context_alloc();
        if (!ce)
                return ERR_PTR(-ENOMEM);
 
        intel_context_init(ce, ctx, engine);
-
-       pos = __intel_context_insert(ctx, engine, ce);
-       if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
-               intel_context_free(ce);
-
-       GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
-       return intel_context_get(pos);
+       return ce;
 }
 
 int __intel_context_do_pin(struct intel_context *ce)
                   struct i915_gem_context *ctx,
                   struct intel_engine_cs *engine)
 {
+       GEM_BUG_ON(!engine->cops);
+
        kref_init(&ce->ref);
 
        ce->gem_context = ctx;
 {
        intel_engine_pm_put(ce->engine);
 }
+
+struct i915_request *intel_context_create_request(struct intel_context *ce)
+{
+       struct i915_request *rq;
+       int err;
+
+       err = intel_context_pin(ce);
+       if (unlikely(err))
+               return ERR_PTR(err);
+
+       rq = i915_request_create(ce);
+       intel_context_unpin(ce);
+
+       return rq;
+}
 
 #include "intel_context_types.h"
 #include "intel_engine_types.h"
 
-struct intel_context *intel_context_alloc(void);
-void intel_context_free(struct intel_context *ce);
-
 void intel_context_init(struct intel_context *ce,
                        struct i915_gem_context *ctx,
                        struct intel_engine_cs *engine);
 
-/**
- * intel_context_lookup - Find the matching HW context for this (ctx, engine)
- * @ctx - the parent GEM context
- * @engine - the target HW engine
- *
- * May return NULL if the HW context hasn't been instantiated (i.e. unused).
- */
 struct intel_context *
-intel_context_lookup(struct i915_gem_context *ctx,
+intel_context_create(struct i915_gem_context *ctx,
                     struct intel_engine_cs *engine);
 
+void intel_context_free(struct intel_context *ce);
+
 /**
  * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
  * @ce - the context
        mutex_unlock(&ce->pin_mutex);
 }
 
-struct intel_context *
-__intel_context_insert(struct i915_gem_context *ctx,
-                      struct intel_engine_cs *engine,
-                      struct intel_context *ce);
-void
-__intel_context_remove(struct intel_context *ce);
-
-struct intel_context *
-intel_context_instance(struct i915_gem_context *ctx,
-                      struct intel_engine_cs *engine);
-
 int __intel_context_do_pin(struct intel_context *ce);
 
 static inline int intel_context_pin(struct intel_context *ce)
        mutex_unlock(&ce->ring->timeline->mutex);
 }
 
+struct i915_request *intel_context_create_request(struct intel_context *ce);
+
 #endif /* __INTEL_CONTEXT_H__ */
 
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
-#include <linux/rbtree.h>
 #include <linux/types.h>
 
 #include "i915_active_types.h"
        struct i915_active_request active_tracker;
 
        const struct intel_context_ops *ops;
-       struct rb_node node;
 
        /** sseu: Control eu/slice partitioning */
        struct intel_sseu sseu;
 
        struct intel_context *ce;
        int err;
 
-       ce = intel_context_instance(ctx, engine);
+       ce = i915_gem_context_get_engine(ctx, engine->id);
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
 
  */
 
 #include "i915_drv.h"
+#include "i915_gem_context.h"
 #include "intel_context.h"
 #include "intel_engine_pm.h"
 
        i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
 
        engine->kernel_context =
-               intel_context_instance(i915->kernel_context, engine);
+               i915_gem_context_get_engine(i915->kernel_context, engine->id);
        if (IS_ERR(engine->kernel_context))
                goto err_timeline;
 
 
                INIT_LIST_HEAD(&s->workload_q_head[i]);
                s->shadow[i] = ERR_PTR(-EINVAL);
 
-               ce = intel_context_instance(ctx, engine);
+               ce = i915_gem_context_get_engine(ctx, i);
                if (IS_ERR(ce)) {
                        ret = PTR_ERR(ce);
                        goto out_shadow_ctx;
 
 
 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 {
-       struct i915_gem_context *ctx;
        struct intel_engine_cs *engine;
+       struct i915_gem_context *ctx;
+       struct i915_gem_engines *e;
        enum intel_engine_id id;
        int err = 0;
 
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
+       e = i915_gem_context_lock_engines(ctx);
+
        for_each_engine(engine, i915, id) {
+               struct intel_context *ce = e->engines[id];
                struct i915_request *rq;
 
-               rq = i915_request_alloc(engine, ctx);
+               rq = intel_context_create_request(ce);
                if (IS_ERR(rq)) {
                        err = PTR_ERR(rq);
-                       goto out_ctx;
+                       goto err_active;
                }
 
                err = 0;
-               if (engine->init_context)
-                       err = engine->init_context(rq);
+               if (rq->engine->init_context)
+                       err = rq->engine->init_context(rq);
 
                i915_request_add(rq);
                if (err)
        }
 
        for_each_engine(engine, i915, id) {
-               struct intel_context *ce;
-               struct i915_vma *state;
+               struct intel_context *ce = e->engines[id];
+               struct i915_vma *state = ce->state;
                void *vaddr;
 
-               ce = intel_context_lookup(ctx, engine);
-               if (!ce)
-                       continue;
-
-               state = ce->state;
                if (!state)
                        continue;
 
        }
 
 out_ctx:
+       i915_gem_context_unlock_engines(ctx);
        i915_gem_context_set_closed(ctx);
        i915_gem_context_put(ctx);
        return err;
 
        if (!engine)
                return ERR_PTR(-EINVAL);
 
-       return intel_context_instance(ctx, engine);
+       return i915_gem_context_get_engine(ctx, engine->id);
 }
 
 static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
        mutex_unlock(&i915->contexts.mutex);
 }
 
-static void i915_gem_context_free(struct i915_gem_context *ctx)
+static void __free_engines(struct i915_gem_engines *e, unsigned int count)
 {
-       struct intel_context *it, *n;
+       while (count--) {
+               if (!e->engines[count])
+                       continue;
+
+               intel_context_put(e->engines[count]);
+       }
+       kfree(e);
+}
+
+static void free_engines(struct i915_gem_engines *e)
+{
+       __free_engines(e, e->num_engines);
+}
+
+static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
+{
+       struct intel_engine_cs *engine;
+       struct i915_gem_engines *e;
+       enum intel_engine_id id;
+
+       e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
+       if (!e)
+               return ERR_PTR(-ENOMEM);
+
+       e->i915 = ctx->i915;
+       for_each_engine(engine, ctx->i915, id) {
+               struct intel_context *ce;
+
+               ce = intel_context_create(ctx, engine);
+               if (IS_ERR(ce)) {
+                       __free_engines(e, id);
+                       return ERR_CAST(ce);
+               }
 
+               e->engines[id] = ce;
+       }
+       e->num_engines = id;
+
+       return e;
+}
+
+static void i915_gem_context_free(struct i915_gem_context *ctx)
+{
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
        GEM_BUG_ON(!list_empty(&ctx->active_engines));
        release_hw_id(ctx);
        i915_ppgtt_put(ctx->ppgtt);
 
-       rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
-               intel_context_put(it);
+       free_engines(rcu_access_pointer(ctx->engines));
+       mutex_destroy(&ctx->engines_mutex);
 
        if (ctx->timeline)
                i915_timeline_put(ctx->timeline);
 __create_context(struct drm_i915_private *dev_priv)
 {
        struct i915_gem_context *ctx;
+       struct i915_gem_engines *e;
+       int err;
        int i;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->active_engines);
        mutex_init(&ctx->mutex);
 
-       ctx->hw_contexts = RB_ROOT;
-       spin_lock_init(&ctx->hw_contexts_lock);
+       mutex_init(&ctx->engines_mutex);
+       e = default_engines(ctx);
+       if (IS_ERR(e)) {
+               err = PTR_ERR(e);
+               goto err_free;
+       }
+       RCU_INIT_POINTER(ctx->engines, e);
 
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
                ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
 
        return ctx;
+
+err_free:
+       kfree(ctx);
+       return ERR_PTR(err);
 }
 
 static struct i915_hw_ppgtt *
 {
        struct drm_i915_private *i915 = ctx->i915;
        struct context_barrier_task *cb;
-       struct intel_context *ce, *next;
+       struct i915_gem_engines_iter it;
+       struct intel_context *ce;
        int err = 0;
 
        lockdep_assert_held(&i915->drm.struct_mutex);
        i915_active_init(i915, &cb->base, cb_retire);
        i915_active_acquire(&cb->base);
 
-       rbtree_postorder_for_each_entry_safe(ce, next, &ctx->hw_contexts, node) {
-               struct intel_engine_cs *engine = ce->engine;
+       for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
                struct i915_request *rq;
 
-               if (!(engine->mask & engines))
+               if (!(ce->engine->mask & engines))
                        continue;
 
                if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
-                                      engine->mask)) {
+                                      ce->engine->mask)) {
                        err = -ENXIO;
                        break;
                }
 
-               rq = i915_request_alloc(engine, ctx);
+               rq = intel_context_create_request(ce);
                if (IS_ERR(rq)) {
                        err = PTR_ERR(rq);
                        break;
                if (err)
                        break;
        }
+       i915_gem_context_unlock_engines(ctx);
 
        cb->task = err ? NULL : task; /* caller needs to unwind instead */
        cb->data = data;
        return err;
 }
 
+/* GEM context-engines iterator: for_each_gem_engine() */
+struct intel_context *
+i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
+{
+       const struct i915_gem_engines *e = it->engines;
+       struct intel_context *ctx;
+
+       do {
+               if (it->idx >= e->num_engines)
+                       return NULL;
+
+               ctx = e->engines[it->idx++];
+       } while (!ctx);
+
+       return ctx;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_context.c"
 #include "selftests/i915_gem_context.c"
 
        kref_put(&ctx->ref, i915_gem_context_release);
 }
 
+static inline struct i915_gem_engines *
+i915_gem_context_engines(struct i915_gem_context *ctx)
+{
+       return rcu_dereference_protected(ctx->engines,
+                                        lockdep_is_held(&ctx->engines_mutex));
+}
+
+static inline struct i915_gem_engines *
+i915_gem_context_lock_engines(struct i915_gem_context *ctx)
+       __acquires(&ctx->engines_mutex)
+{
+       mutex_lock(&ctx->engines_mutex);
+       return i915_gem_context_engines(ctx);
+}
+
+static inline void
+i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
+       __releases(&ctx->engines_mutex)
+{
+       mutex_unlock(&ctx->engines_mutex);
+}
+
+static inline struct intel_context *
+i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx)
+{
+       return i915_gem_context_engines(ctx)->engines[idx];
+}
+
+static inline struct intel_context *
+i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
+{
+       struct intel_context *ce = ERR_PTR(-EINVAL);
+
+       rcu_read_lock(); {
+               struct i915_gem_engines *e = rcu_dereference(ctx->engines);
+               if (likely(idx < e->num_engines && e->engines[idx]))
+                       ce = intel_context_get(e->engines[idx]);
+       } rcu_read_unlock();
+
+       return ce;
+}
+
+static inline void
+i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
+                          struct i915_gem_engines *engines)
+{
+       GEM_BUG_ON(!engines);
+       it->engines = engines;
+       it->idx = 0;
+}
+
+struct intel_context *
+i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
+
+#define for_each_gem_engine(ce, engines, it) \
+       for (i915_gem_engines_iter_init(&(it), (engines)); \
+            ((ce) = i915_gem_engines_iter_next(&(it)));)
+
 struct i915_lut_handle *i915_lut_handle_alloc(void);
 void i915_lut_handle_free(struct i915_lut_handle *lut);
 
 
 struct i915_timeline;
 struct intel_ring;
 
+struct i915_gem_engines {
+       struct rcu_work rcu;
+       struct drm_i915_private *i915;
+       unsigned int num_engines;
+       struct intel_context *engines[];
+};
+
+struct i915_gem_engines_iter {
+       unsigned int idx;
+       const struct i915_gem_engines *engines;
+};
+
 /**
  * struct i915_gem_context - client state
  *
        /** file_priv: owning file descriptor */
        struct drm_i915_file_private *file_priv;
 
+       /**
+        * @engines: User defined engines for this context
+        *
+        * Various uAPI offer the ability to lookup up an
+        * index from this array to select an engine operate on.
+        *
+        * Multiple logically distinct instances of the same engine
+        * may be defined in the array, as well as composite virtual
+        * engines.
+        *
+        * Execbuf uses the I915_EXEC_RING_MASK as an index into this
+        * array to select which HW context + engine to execute on. For
+        * the default array, the user_ring_map[] is used to translate
+        * the legacy uABI onto the approprate index (e.g. both
+        * I915_EXEC_DEFAULT and I915_EXEC_RENDER select the same
+        * context, and I915_EXEC_BSD is weird). For a use defined
+        * array, execbuf uses I915_EXEC_RING_MASK as a plain index.
+        *
+        * User defined by I915_CONTEXT_PARAM_ENGINE (when the
+        * CONTEXT_USER_ENGINES flag is set).
+        */
+       struct i915_gem_engines __rcu *engines;
+       struct mutex engines_mutex; /* guards writes to engines */
+
        struct i915_timeline *timeline;
 
        /**
 
        struct i915_sched_attr sched;
 
-       /** hw_contexts: per-engine logical HW state */
-       struct rb_root hw_contexts;
-       spinlock_t hw_contexts_lock;
-
        /** ring_size: size for allocating the per-engine ring buffer */
        u32 ring_size;
        /** desc_template: invariant fields for the HW context descriptor */
 
        return file_priv->bsd_engine;
 }
 
-#define I915_USER_RINGS (4)
-
-static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
+static const enum intel_engine_id user_ring_map[] = {
        [I915_EXEC_DEFAULT]     = RCS0,
        [I915_EXEC_RENDER]      = RCS0,
        [I915_EXEC_BLT]         = BCS0,
        [I915_EXEC_VEBOX]       = VECS0
 };
 
-static int eb_pin_context(struct i915_execbuffer *eb,
-                         struct intel_engine_cs *engine)
+static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
 {
-       struct intel_context *ce;
        int err;
 
        /*
        if (err)
                return err;
 
-       ce = intel_context_instance(eb->gem_context, engine);
-       if (IS_ERR(ce))
-               return PTR_ERR(ce);
-
        /*
         * Pinning the contexts may generate requests in order to acquire
         * GGTT space, so do this first before we reserve a seqno for
         * ourselves.
         */
        err = intel_context_pin(ce);
-       intel_context_put(ce);
        if (err)
                return err;
 
-       eb->engine = engine;
+       eb->engine = ce->engine;
        eb->context = ce;
        return 0;
 }
        intel_context_unpin(eb->context);
 }
 
-static int
-eb_select_engine(struct i915_execbuffer *eb,
-                struct drm_file *file,
-                struct drm_i915_gem_execbuffer2 *args)
+static unsigned int
+eb_select_legacy_ring(struct i915_execbuffer *eb,
+                     struct drm_file *file,
+                     struct drm_i915_gem_execbuffer2 *args)
 {
        struct drm_i915_private *i915 = eb->i915;
        unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
-       struct intel_engine_cs *engine;
-
-       if (user_ring_id > I915_USER_RINGS) {
-               DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
-               return -EINVAL;
-       }
 
-       if ((user_ring_id != I915_EXEC_BSD) &&
-           ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+       if (user_ring_id != I915_EXEC_BSD &&
+           (args->flags & I915_EXEC_BSD_MASK)) {
                DRM_DEBUG("execbuf with non bsd ring but with invalid "
                          "bsd dispatch flags: %d\n", (int)(args->flags));
-               return -EINVAL;
+               return -1;
        }
 
        if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
                } else {
                        DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
                                  bsd_idx);
-                       return -EINVAL;
+                       return -1;
                }
 
-               engine = i915->engine[_VCS(bsd_idx)];
-       } else {
-               engine = i915->engine[user_ring_map[user_ring_id]];
+               return _VCS(bsd_idx);
        }
 
-       if (!engine) {
-               DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
-               return -EINVAL;
+       if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
+               DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+               return -1;
        }
 
-       return eb_pin_context(eb, engine);
+       return user_ring_map[user_ring_id];
+}
+
+static int
+eb_select_engine(struct i915_execbuffer *eb,
+                struct drm_file *file,
+                struct drm_i915_gem_execbuffer2 *args)
+{
+       struct intel_context *ce;
+       unsigned int idx;
+       int err;
+
+       idx = eb_select_legacy_ring(eb, file, args);
+
+       ce = i915_gem_context_get_engine(eb->gem_context, idx);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
+
+       err = eb_pin_context(eb, ce);
+       intel_context_put(ce);
+
+       return err;
 }
 
 static void
 
 static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
                                            struct i915_gem_context *ctx)
 {
-       struct intel_engine_cs *engine = i915->engine[RCS0];
+       struct i915_gem_engines_iter it;
        struct intel_context *ce;
        int err;
 
-       ce = intel_context_instance(ctx, engine);
-       if (IS_ERR(ce))
-               return ce;
-
        err = i915_mutex_lock_interruptible(&i915->drm);
-       if (err) {
-               intel_context_put(ce);
+       if (err)
                return ERR_PTR(err);
+
+       for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+               if (ce->engine->class != RENDER_CLASS)
+                       continue;
+
+               /*
+                * As the ID is the gtt offset of the context's vma we
+                * pin the vma to ensure the ID remains fixed.
+                */
+               err = intel_context_pin(ce);
+               if (err == 0) {
+                       i915->perf.oa.pinned_ctx = ce;
+                       break;
+               }
        }
+       i915_gem_context_unlock_engines(ctx);
 
-       /*
-        * As the ID is the gtt offset of the context's vma we
-        * pin the vma to ensure the ID remains fixed.
-        *
-        * NB: implied RCS engine...
-        */
-       err = intel_context_pin(ce);
        mutex_unlock(&i915->drm.struct_mutex);
-       intel_context_put(ce);
        if (err)
                return ERR_PTR(err);
 
-       i915->perf.oa.pinned_ctx = ce;
-
-       return ce;
+       return i915->perf.oa.pinned_ctx;
 }
 
 /**
 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
                                       const struct i915_oa_config *oa_config)
 {
-       struct intel_engine_cs *engine = dev_priv->engine[RCS0];
        unsigned int map_type = i915_coherent_map_type(dev_priv);
        struct i915_gem_context *ctx;
        struct i915_request *rq;
 
        /* Update all contexts now that we've stalled the submission. */
        list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
-               struct intel_context *ce = intel_context_lookup(ctx, engine);
-               u32 *regs;
-
-               /* OA settings will be set upon first use */
-               if (!ce || !ce->state)
-                       continue;
-
-               regs = i915_gem_object_pin_map(ce->state->obj, map_type);
-               if (IS_ERR(regs))
-                       return PTR_ERR(regs);
+               struct i915_gem_engines_iter it;
+               struct intel_context *ce;
+
+               for_each_gem_engine(ce,
+                                   i915_gem_context_lock_engines(ctx),
+                                   it) {
+                       u32 *regs;
+
+                       if (ce->engine->class != RENDER_CLASS)
+                               continue;
+
+                       /* OA settings will be set upon first use */
+                       if (!ce->state)
+                               continue;
+
+                       regs = i915_gem_object_pin_map(ce->state->obj,
+                                                      map_type);
+                       if (IS_ERR(regs)) {
+                               i915_gem_context_unlock_engines(ctx);
+                               return PTR_ERR(regs);
+                       }
 
-               ce->state->obj->mm.dirty = true;
-               regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
+                       ce->state->obj->mm.dirty = true;
+                       regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
 
-               gen8_update_reg_state_unlocked(ce, regs, oa_config);
+                       gen8_update_reg_state_unlocked(ce, regs, oa_config);
 
-               i915_gem_object_unpin_map(ce->state->obj);
+                       i915_gem_object_unpin_map(ce->state->obj);
+               }
+               i915_gem_context_unlock_engines(ctx);
        }
 
        /*
         * Apply the configuration by doing one context restore of the edited
         * context image.
         */
-       rq = i915_request_create(engine->kernel_context);
+       rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
 
 
        struct drm_i915_private *i915 = engine->i915;
        struct intel_context *ce;
        struct i915_request *rq;
-       int err;
 
        /*
         * Preempt contexts are reserved for exclusive use to inject a
         * GGTT space, so do this first before we reserve a seqno for
         * ourselves.
         */
-       ce = intel_context_instance(ctx, engine);
+       ce = i915_gem_context_get_engine(ctx, engine->id);
        if (IS_ERR(ce))
                return ERR_CAST(ce);
 
-       err = intel_context_pin(ce);
-       if (err) {
-               rq = ERR_PTR(err);
-               goto err_put;
-       }
-
-       rq = i915_request_create(ce);
-       intel_context_unpin(ce);
-
-err_put:
+       rq = intel_context_create_request(ce);
        intel_context_put(ce);
+
        return rq;
 }
 
 
 static void guc_stage_desc_init(struct intel_guc_client *client)
 {
        struct intel_guc *guc = client->guc;
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct intel_engine_cs *engine;
        struct i915_gem_context *ctx = client->owner;
+       struct i915_gem_engines_iter it;
        struct guc_stage_desc *desc;
-       unsigned int tmp;
+       struct intel_context *ce;
        u32 gfx_addr;
 
        desc = __get_stage_desc(client);
        desc->priority = client->priority;
        desc->db_id = client->doorbell_id;
 
-       for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
-               struct intel_context *ce = intel_context_lookup(ctx, engine);
-               u32 guc_engine_id = engine->guc_id;
-               struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
+       for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+               struct guc_execlist_context *lrc;
+
+               if (!(ce->engine->mask & client->engines))
+                       continue;
 
                /* TODO: We have a design issue to be solved here. Only when we
                 * receive the first batch, we know which engine is used by the
                 * for now who owns a GuC client. But for future owner of GuC
                 * client, need to make sure lrc is pinned prior to enter here.
                 */
-               if (!ce || !ce->state)
+               if (!ce->state)
                        break;  /* XXX: continue? */
 
                /*
                 * Instead, the GuC uses the LRCA of the user mode context (see
                 * guc_add_request below).
                 */
+               lrc = &desc->lrc[ce->engine->guc_id];
                lrc->context_desc = lower_32_bits(ce->lrc_desc);
 
                /* The state page is after PPHWSP */
                 * here. In proxy submission, it wants the stage id
                 */
                lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
-                               (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
+                               (ce->engine->guc_id << GUC_ELC_ENGINE_OFFSET);
 
                lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma);
                lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
                lrc->ring_next_free_location = lrc->ring_begin;
                lrc->ring_current_tail_pointer_value = 0;
 
-               desc->engines_used |= (1 << guc_engine_id);
+               desc->engines_used |= BIT(ce->engine->guc_id);
        }
+       i915_gem_context_unlock_engines(ctx);
 
        DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
                         client->engines, desc->engines_used);
 
 
        wakeref = intel_runtime_pm_get(i915);
 
-       ce = intel_context_instance(ctx, i915->engine[RCS0]);
+       ce = i915_gem_context_get_engine(ctx, RCS0);
        if (IS_ERR(ce)) {
                ret = PTR_ERR(ce);
                goto out_rpm;
 
             const char *name)
 {
        struct i915_gem_context *ctx;
+       struct i915_gem_engines *e;
        int ret;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->link);
        ctx->i915 = i915;
 
-       ctx->hw_contexts = RB_ROOT;
-       spin_lock_init(&ctx->hw_contexts_lock);
+       mutex_init(&ctx->engines_mutex);
+       e = default_engines(ctx);
+       if (IS_ERR(e))
+               goto err_free;
+       RCU_INIT_POINTER(ctx->engines, e);
 
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
 
        ret = i915_gem_context_pin_hw_id(ctx);
        if (ret < 0)
-               goto err_handles;
+               goto err_engines;
 
        if (name) {
                struct i915_hw_ppgtt *ppgtt;
 
        return ctx;
 
-err_handles:
+err_engines:
+       free_engines(rcu_access_pointer(ctx->engines));
+err_free:
        kfree(ctx);
        return NULL;