} while (1);
 }
 
+static void intel_context_set_gem(struct intel_context *ce,
+                                 struct i915_gem_context *ctx)
+{
+       GEM_BUG_ON(ce->gem_context);
+       ce->gem_context = ctx;
+
+       if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
+               ce->ring = __intel_context_ring_size(SZ_16K);
+
+       if (rcu_access_pointer(ctx->vm)) {
+               struct i915_address_space *vm;
+
+               rcu_read_lock();
+               vm = context_get_vm_rcu(ctx); /* hmm */
+               rcu_read_unlock();
+
+               i915_vm_put(ce->vm);
+               ce->vm = vm;
+       }
+
+       GEM_BUG_ON(ce->timeline);
+       if (ctx->timeline)
+               ce->timeline = intel_timeline_get(ctx->timeline);
+
+       if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+           intel_engine_has_semaphores(ce->engine))
+               __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
 {
        while (count--) {
                GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
                GEM_BUG_ON(e->engines[engine->legacy_idx]);
 
-               ce = intel_context_create(ctx, engine);
+               ce = intel_context_create(engine);
                if (IS_ERR(ce)) {
                        __free_engines(e, e->num_engines + 1);
                        return ERR_CAST(ce);
                }
 
+               intel_context_set_gem(ce, ctx);
+
                e->engines[engine->legacy_idx] = ce;
                e->num_engines = max(e->num_engines, engine->legacy_idx);
        }
        return ctx;
 }
 
-static void
-destroy_kernel_context(struct i915_gem_context **ctxp)
-{
-       struct i915_gem_context *ctx;
-
-       /* Keep the context ref so that we can free it immediately ourselves */
-       ctx = i915_gem_context_get(fetch_and_zero(ctxp));
-       GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
-       context_close(ctx);
-       i915_gem_context_free(ctx);
-}
-
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
-{
-       struct i915_gem_context *ctx;
-
-       ctx = i915_gem_create_context(i915, 0);
-       if (IS_ERR(ctx))
-               return ctx;
-
-       i915_gem_context_clear_bannable(ctx);
-       i915_gem_context_set_persistence(ctx);
-       ctx->sched.priority = I915_USER_PRIORITY(prio);
-
-       GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
-       return ctx;
-}
-
 static void init_contexts(struct i915_gem_contexts *gc)
 {
        spin_lock_init(&gc->lock);
        init_llist_head(&gc->free_list);
 }
 
-int i915_gem_init_contexts(struct drm_i915_private *i915)
+void i915_gem_init__contexts(struct drm_i915_private *i915)
 {
-       struct i915_gem_context *ctx;
-
-       /* Reassure ourselves we are only called once */
-       GEM_BUG_ON(i915->kernel_context);
-
        init_contexts(&i915->gem.contexts);
-
-       /* lowest priority; idle task */
-       ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
-       if (IS_ERR(ctx)) {
-               DRM_ERROR("Failed to create default global context\n");
-               return PTR_ERR(ctx);
-       }
-       i915->kernel_context = ctx;
-
        DRM_DEBUG_DRIVER("%s context support initialized\n",
                         DRIVER_CAPS(i915)->has_logical_contexts ?
                         "logical" : "fake");
-       return 0;
 }
 
 void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
 {
-       destroy_kernel_context(&i915->kernel_context);
        flush_work(&i915->gem.contexts.free_work);
 }
 
        if (err < 0)
                goto err_ctx;
 
-       GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
        GEM_BUG_ON(err > 0);
 
        return 0;
                }
        }
 
-       ce = intel_execlists_create_virtual(set->ctx, siblings, n);
+       ce = intel_execlists_create_virtual(siblings, n);
        if (IS_ERR(ce)) {
                err = PTR_ERR(ce);
                goto out_siblings;
        }
 
+       intel_context_set_gem(ce, set->ctx);
+
        if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
                intel_context_put(ce);
                err = -EEXIST;
                        return -ENOENT;
                }
 
-               ce = intel_context_create(ctx, engine);
+               ce = intel_context_create(engine);
                if (IS_ERR(ce)) {
                        __free_engines(set.engines, n);
                        return PTR_ERR(ce);
                }
 
+               intel_context_set_gem(ce, ctx);
+
                set.engines->engines[n] = ce;
        }
        set.engines->num_engines = num_engines;
                 */
                if (intel_engine_is_virtual(engine))
                        clone->engines[n] =
-                               intel_execlists_clone_virtual(dst, engine);
+                               intel_execlists_clone_virtual(engine);
                else
-                       clone->engines[n] = intel_context_create(dst, engine);
+                       clone->engines[n] = intel_context_create(engine);
                if (IS_ERR_OR_NULL(clone->engines[n])) {
                        __free_engines(clone, n);
                        goto err_unlock;
                }
+
+               intel_context_set_gem(clone->engines[n], dst);
        }
        clone->num_engines = n;
 
 
        clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
 }
 
-static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
-{
-       return !ctx->file_priv;
-}
-
 /* i915_gem_context.c */
-int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_init__contexts(struct drm_i915_private *i915);
 void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
                                       struct drm_file *file);
 
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
-
 static inline struct i915_gem_context *
 i915_gem_context_get(struct i915_gem_context *ctx)
 {
 
                return -EFAULT;
 
        if (args->flags & I915_USERPTR_READ_ONLY) {
-               struct i915_address_space *vm;
-
                /*
                 * On almost all of the older hw, we cannot tell the GPU that
                 * a page is readonly.
                 */
-               vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
-                                              true); /* static vm */
-               if (!vm || !vm->has_read_only)
+               if (!dev_priv->gt.vm->has_read_only)
                        return -ENODEV;
        }
 
 
                        if (!data[m].ce[0])
                                continue;
 
-                       ce = intel_context_create(ctx, data[m].ce[0]->engine);
+                       ce = intel_context_create(data[m].ce[0]->engine);
                        if (IS_ERR(ce))
                                goto out;
 
                        hweight32(engine->sseu.slice_mask),
                        hweight32(pg_sseu.slice_mask));
 
-               ce = intel_context_create(engine->kernel_context->gem_context,
-                                         engine);
+               ce = intel_context_create(engine);
                if (IS_ERR(ce)) {
                        ret = PTR_ERR(ce);
                        goto out_put;
 
 struct i915_gem_context *
 kernel_context(struct drm_i915_private *i915)
 {
-       return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL);
+       struct i915_gem_context *ctx;
+
+       ctx = i915_gem_create_context(i915, 0);
+       if (IS_ERR(ctx))
+               return ctx;
+
+       i915_gem_context_clear_bannable(ctx);
+       i915_gem_context_set_persistence(ctx);
+
+       return ctx;
 }
 
 void kernel_context_close(struct i915_gem_context *ctx)
 
 }
 
 struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
-                    struct intel_engine_cs *engine)
+intel_context_create(struct intel_engine_cs *engine)
 {
        struct intel_context *ce;
 
        if (!ce)
                return ERR_PTR(-ENOMEM);
 
-       intel_context_init(ce, ctx, engine);
+       intel_context_init(ce, engine);
        return ce;
 }
 
                CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
                         ce->ring->head, ce->ring->tail);
 
-               i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
-
                smp_mb__before_atomic(); /* flush pin before it is visible */
        }
 
 
                ce->ops->unpin(ce);
 
-               i915_gem_context_put(ce->gem_context);
                intel_context_active_release(ce);
        }
 
                return err;
 
        /* Preallocate tracking nodes */
-       if (!i915_gem_context_is_kernel(ce->gem_context)) {
+       if (!intel_context_is_barrier(ce)) {
                err = i915_active_acquire_preallocate_barrier(&ce->active,
                                                              ce->engine);
                if (err) {
 
 void
 intel_context_init(struct intel_context *ce,
-                  struct i915_gem_context *ctx,
                   struct intel_engine_cs *engine)
 {
-       struct i915_address_space *vm;
-
        GEM_BUG_ON(!engine->cops);
+       GEM_BUG_ON(!engine->gt->vm);
 
        kref_init(&ce->ref);
 
-       ce->gem_context = ctx;
-       rcu_read_lock();
-       vm = rcu_dereference(ctx->vm);
-       if (vm)
-               ce->vm = i915_vm_get(vm);
-       else
-               ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
-       rcu_read_unlock();
-       if (ctx->timeline)
-               ce->timeline = intel_timeline_get(ctx->timeline);
-       if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
-           intel_engine_has_semaphores(engine))
-               __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
-
        ce->engine = engine;
        ce->ops = engine->cops;
        ce->sseu = engine->sseu;
-       ce->ring = __intel_context_ring_size(SZ_16K);
+       ce->ring = __intel_context_ring_size(SZ_4K);
+
+       ce->vm = i915_vm_get(engine->gt->vm);
 
        INIT_LIST_HEAD(&ce->signal_link);
        INIT_LIST_HEAD(&ce->signals);
 
 } while (0)
 
 void intel_context_init(struct intel_context *ce,
-                       struct i915_gem_context *ctx,
                        struct intel_engine_cs *engine);
 void intel_context_fini(struct intel_context *ce);
 
 struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
-                    struct intel_engine_cs *engine);
+intel_context_create(struct intel_engine_cs *engine);
 
 void intel_context_free(struct intel_context *ce);
 
        return u64_to_ptr(struct intel_ring, sz);
 }
 
+static inline bool intel_context_is_barrier(const struct intel_context *ce)
+{
+       return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
+}
+
 static inline bool intel_context_use_semaphores(const struct intel_context *ce)
 {
        return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
 
        struct intel_timeline *timeline;
 
        unsigned long flags;
-#define CONTEXT_ALLOC_BIT              0
-#define CONTEXT_VALID_BIT              1
-#define CONTEXT_USE_SEMAPHORES         2
-#define CONTEXT_BANNED                 3
-#define CONTEXT_FORCE_SINGLE_SUBMISSION        4
-#define CONTEXT_NOPREEMPT              5
+#define CONTEXT_BARRIER_BIT            0
+#define CONTEXT_ALLOC_BIT              1
+#define CONTEXT_VALID_BIT              2
+#define CONTEXT_USE_SEMAPHORES         3
+#define CONTEXT_BANNED                 4
+#define CONTEXT_FORCE_SINGLE_SUBMISSION        5
+#define CONTEXT_NOPREEMPT              6
 
        u32 *lrc_reg_state;
        u64 lrc_desc;
 
        struct intel_context *ce;
        int err;
 
-       ce = intel_context_create(engine->i915->kernel_context, engine);
+       ce = intel_context_create(engine);
        if (IS_ERR(ce))
                return ce;
 
-       ce->ring = __intel_context_ring_size(SZ_4K);
+       __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
 
-       err = intel_context_pin(ce);
+       err = intel_context_pin(ce); /* perma-pin so it is always available */
        if (err) {
                intel_context_put(ce);
                return ERR_PTR(err);
 
        engine->set_default_submission(engine);
 
+       ret = measure_breadcrumb_dw(engine);
+       if (ret < 0)
+               return ret;
+
+       engine->emit_fini_breadcrumb_dw = ret;
+
        /*
         * We may need to do things with the shrinker which
         * require us to immediately switch back to the default
 
        engine->kernel_context = ce;
 
-       ret = measure_breadcrumb_dw(engine);
-       if (ret < 0)
-               goto err_unpin;
-
-       engine->emit_fini_breadcrumb_dw = ret;
-
        return 0;
-
-err_unpin:
-       intel_context_unpin(ce);
-       intel_context_put(ce);
-       return ret;
 }
 
 /**
 
 
 #include "i915_drv.h"
 
+#include "intel_context.h"
 #include "intel_engine.h"
 #include "intel_engine_heartbeat.h"
 #include "intel_engine_pm.h"
        unsigned long flags;
        bool result = true;
 
+       GEM_BUG_ON(!intel_context_is_barrier(ce));
+
        /* Already inside the kernel context, safe to power down. */
        if (engine->wakeref_serial == engine->serial)
                return true;
 
        struct intel_uncore *uncore = gt->uncore;
        int ret;
 
-       BUG_ON(!i915->kernel_context);
        ret = intel_gt_terminally_wedged(gt);
        if (ret)
                return ret;
        i915_vma_unpin_and_release(>->scratch, 0);
 }
 
+static struct i915_address_space *kernel_vm(struct intel_gt *gt)
+{
+       if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
+               return &i915_ppgtt_create(gt->i915)->vm;
+       else
+               return i915_vm_get(>->ggtt->vm);
+}
+
 int intel_gt_init(struct intel_gt *gt)
 {
        int err;
 
        intel_gt_pm_init(gt);
 
+       gt->vm = kernel_vm(gt);
+       if (!gt->vm) {
+               err = -ENOMEM;
+               goto err_scratch;
+       }
+
        return 0;
+
+err_scratch:
+       intel_gt_fini_scratch(gt);
+       return err;
 }
 
 void intel_gt_driver_remove(struct intel_gt *gt)
 
 void intel_gt_driver_release(struct intel_gt *gt)
 {
+       struct i915_address_space *vm;
+
+       vm = fetch_and_zero(>->vm);
+       if (vm) /* FIXME being called twice on error paths :( */
+               i915_vm_put(vm);
+
        intel_gt_pm_fini(gt);
        intel_gt_fini_scratch(gt);
 }
 
 
 #include "i915_drv.h" /* for_each_engine() */
 #include "i915_request.h"
+#include "intel_engine_heartbeat.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
 #include "intel_gt_requests.h"
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
-       for_each_engine(engine, gt, id)
+       for_each_engine(engine, gt, id) {
                intel_engine_flush_submission(engine);
+               flush_work(&engine->retire_work);
+       }
 }
 
 static void engine_retire(struct work_struct *work)
 
        spin_lock(&timelines->lock);
        list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
-               if (!mutex_trylock(&tl->mutex)) {
-                       active_count++; /* report busy to caller, try again? */
+               active_count++; /* report busy to caller, try again? */
+               if (!mutex_trylock(&tl->mutex))
                        continue;
-               }
 
                intel_timeline_get(tl);
                GEM_BUG_ON(!atomic_read(&tl->active_count));
 
                /* Resume iteration after dropping lock */
                list_safe_reset_next(tl, tn, link);
-               if (atomic_dec_and_test(&tl->active_count))
+               if (atomic_dec_and_test(&tl->active_count)) {
                        list_del(&tl->link);
-               else
-                       active_count += !!rcu_access_pointer(tl->last_request.fence);
+                       active_count--;
+               }
 
                mutex_unlock(&tl->mutex);
 
        list_for_each_entry_safe(tl, tn, &free, link)
                __intel_timeline_free(&tl->kref);
 
+       flush_submission(gt);
+
        return active_count ? timeout : 0;
 }
 
 
        struct intel_engine_cs *engine[I915_NUM_ENGINES];
        struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
                                            [MAX_ENGINE_INSTANCE + 1];
+
+       /*
+        * Default address space (either GGTT or ppGTT depending on arch).
+        *
+        * Reserved for exclusive use by the kernel.
+        */
+       struct i915_address_space *vm;
 };
 
 enum intel_gt_scratch_field {
 
  */
 #include <linux/interrupt.h>
 
-#include "gem/i915_gem_context.h"
-
 #include "i915_drv.h"
 #include "i915_perf.h"
 #include "i915_trace.h"
 #include "i915_vgpu.h"
+#include "intel_context.h"
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
                if (i915_request_completed(rq))
                        goto unlock;
 
-               if (i915_active_is_idle(&ce->active) && ce->gem_context) {
+               if (i915_active_is_idle(&ce->active) &&
+                   !intel_context_is_barrier(ce)) {
                        GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n",
                                      ce->timeline->fence_context,
                                      port - execlists->pending);
 }
 
 struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
-                              struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
                               unsigned int count)
 {
        struct virtual_engine *ve;
                return ERR_PTR(-EINVAL);
 
        if (count == 1)
-               return intel_context_create(ctx, siblings[0]);
+               return intel_context_create(siblings[0]);
 
        ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
        if (!ve)
                return ERR_PTR(-ENOMEM);
 
-       ve->base.i915 = ctx->i915;
+       ve->base.i915 = siblings[0]->i915;
        ve->base.gt = siblings[0]->gt;
        ve->base.uncore = siblings[0]->uncore;
        ve->base.id = -1;
                     virtual_submission_tasklet,
                     (unsigned long)ve);
 
-       intel_context_init(&ve->context, ctx, &ve->base);
+       intel_context_init(&ve->context, &ve->base);
 
        for (n = 0; n < count; n++) {
                struct intel_engine_cs *sibling = siblings[n];
 }
 
 struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
-                             struct intel_engine_cs *src)
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
 {
        struct virtual_engine *se = to_virtual_engine(src);
        struct intel_context *dst;
 
-       dst = intel_execlists_create_virtual(ctx,
-                                            se->siblings,
+       dst = intel_execlists_create_virtual(se->siblings,
                                             se->num_siblings);
        if (IS_ERR(dst))
                return dst;
 
                                   unsigned int max);
 
 struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
-                              struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
                               unsigned int count);
 
 struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
-                             struct intel_engine_cs *src);
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
 
 int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
                                     const struct intel_engine_cs *master,
 
                        i915_request_skip(rq, -EIO);
 }
 
-static void client_mark_guilty(struct i915_request *rq, bool banned)
+static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
 {
-       struct i915_gem_context *ctx = rq->context->gem_context;
        struct drm_i915_file_private *file_priv = ctx->file_priv;
        unsigned long prev_hang;
        unsigned int score;
 
 static bool mark_guilty(struct i915_request *rq)
 {
-       struct i915_gem_context *ctx = rq->context->gem_context;
+       struct i915_gem_context *ctx;
        unsigned long prev_hang;
        bool banned;
        int i;
 
+       ctx = rq->context->gem_context;
+       if (!ctx)
+               return false;
+
        if (i915_gem_context_is_closed(ctx)) {
                intel_context_set_banned(rq->context);
                return true;
                intel_context_set_banned(rq->context);
        }
 
-       client_mark_guilty(rq, banned);
+       client_mark_guilty(ctx, banned);
 
        return banned;
 }
 
 static void mark_innocent(struct i915_request *rq)
 {
-       atomic_inc(&rq->context->gem_context->active_count);
+       if (rq->context->gem_context)
+               atomic_inc(&rq->context->gem_context->active_count);
 }
 
 void __i915_request_reset(struct i915_request *rq, bool guilty)
 
        struct i915_gem_context *ctx = rq->context->gem_context;
        int i, err;
 
-       if (!ctx->remap_slice)
+       if (!ctx || !ctx->remap_slice)
                return 0;
 
        for (i = 0; i < MAX_L3_SLICES; i++) {
 
        return err;
 }
 
-static int __live_context_size(struct intel_engine_cs *engine,
-                              struct i915_gem_context *fixme)
+static int __live_context_size(struct intel_engine_cs *engine)
 {
        struct intel_context *ce;
        struct i915_request *rq;
        void *vaddr;
        int err;
 
-       ce = intel_context_create(fixme, engine);
+       ce = intel_context_create(engine);
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
 {
        struct intel_gt *gt = arg;
        struct intel_engine_cs *engine;
-       struct i915_gem_context *fixme;
        enum intel_engine_id id;
        int err = 0;
 
         * HW tries to write past the end of one.
         */
 
-       fixme = kernel_context(gt->i915);
-       if (IS_ERR(fixme))
-               return PTR_ERR(fixme);
-
        for_each_engine(engine, gt, id) {
                struct {
                        struct drm_i915_gem_object *state;
                /* Overlaps with the execlists redzone */
                engine->context_size += I915_GTT_PAGE_SIZE;
 
-               err = __live_context_size(engine, fixme);
+               err = __live_context_size(engine);
 
                engine->context_size -= I915_GTT_PAGE_SIZE;
 
                        break;
        }
 
-       kernel_context_close(fixme);
        return err;
 }
 
-static int __live_active_context(struct intel_engine_cs *engine,
-                                struct i915_gem_context *fixme)
+static int __live_active_context(struct intel_engine_cs *engine)
 {
        unsigned long saved_heartbeat;
        struct intel_context *ce;
                return -EINVAL;
        }
 
-       ce = intel_context_create(fixme, engine);
+       ce = intel_context_create(engine);
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
 {
        struct intel_gt *gt = arg;
        struct intel_engine_cs *engine;
-       struct i915_gem_context *fixme;
        enum intel_engine_id id;
-       struct file *file;
        int err = 0;
 
-       file = mock_file(gt->i915);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       fixme = live_context(gt->i915, file);
-       if (IS_ERR(fixme)) {
-               err = PTR_ERR(fixme);
-               goto out_file;
-       }
-
        for_each_engine(engine, gt, id) {
-               err = __live_active_context(engine, fixme);
+               err = __live_active_context(engine);
                if (err)
                        break;
 
                        break;
        }
 
-out_file:
-       fput(file);
        return err;
 }
 
        return err;
 }
 
-static int __live_remote_context(struct intel_engine_cs *engine,
-                                struct i915_gem_context *fixme)
+static int __live_remote_context(struct intel_engine_cs *engine)
 {
        struct intel_context *local, *remote;
        unsigned long saved_heartbeat;
                return -EINVAL;
        }
 
-       remote = intel_context_create(fixme, engine);
+       remote = intel_context_create(engine);
        if (IS_ERR(remote))
                return PTR_ERR(remote);
 
-       local = intel_context_create(fixme, engine);
+       local = intel_context_create(engine);
        if (IS_ERR(local)) {
                err = PTR_ERR(local);
                goto err_remote;
 {
        struct intel_gt *gt = arg;
        struct intel_engine_cs *engine;
-       struct i915_gem_context *fixme;
        enum intel_engine_id id;
-       struct file *file;
        int err = 0;
 
-       file = mock_file(gt->i915);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       fixme = live_context(gt->i915, file);
-       if (IS_ERR(fixme)) {
-               err = PTR_ERR(fixme);
-               goto out_file;
-       }
-
        for_each_engine(engine, gt, id) {
-               err = __live_remote_context(engine, fixme);
+               err = __live_remote_context(engine);
                if (err)
                        break;
 
                        break;
        }
 
-out_file:
-       fput(file);
        return err;
 }
 
 
        int err;
        int i;
 
-       ce = intel_context_create(engine->kernel_context->gem_context,
-                                 engine);
+       ce = intel_context_create(engine);
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
 
 #include <linux/kthread.h>
 
 #include "gem/i915_gem_context.h"
-#include "gt/intel_gt.h"
+
+#include "intel_gt.h"
+#include "intel_engine_heartbeat.h"
 #include "intel_engine_pm.h"
 
 #include "i915_selftest.h"
                          1000));
 }
 
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+                                    unsigned long *saved)
+{
+       *saved = engine->props.heartbeat_interval_ms;
+       engine->props.heartbeat_interval_ms = 0;
+
+       intel_engine_pm_get(engine);
+       intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+                                   unsigned long saved)
+{
+       intel_engine_pm_put(engine);
+
+       engine->props.heartbeat_interval_ms = saved;
+}
+
 static int igt_hang_sanitycheck(void *arg)
 {
        struct intel_gt *gt = arg;
        struct intel_gt *gt = arg;
        struct i915_gpu_error *global = >->i915->gpu_error;
        struct intel_engine_cs *engine;
-       struct i915_gem_context *ctx;
        unsigned int reset_count, count;
        enum intel_engine_id id;
        IGT_TIMEOUT(end_time);
-       struct file *file;
        int err = 0;
 
        /* Check that we can reset during non-user portions of requests */
 
-       file = mock_file(gt->i915);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       ctx = live_context(gt->i915, file);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto out;
-       }
-
-       i915_gem_context_clear_bannable(ctx);
        reset_count = i915_reset_count(global);
        count = 0;
        do {
                for_each_engine(engine, gt, id) {
+                       struct intel_context *ce;
                        int i;
 
+                       ce = intel_context_create(engine);
+                       if (IS_ERR(ce)) {
+                               err = PTR_ERR(ce);
+                               break;
+                       }
+
                        for (i = 0; i < 16; i++) {
                                struct i915_request *rq;
 
-                               rq = igt_request_alloc(ctx, engine);
+                               rq = intel_context_create_request(ce);
                                if (IS_ERR(rq)) {
                                        err = PTR_ERR(rq);
                                        break;
 
                                i915_request_add(rq);
                        }
+
+                       intel_context_put(ce);
                }
 
                igt_global_reset_lock(gt);
        } while (time_before(jiffies, end_time));
        pr_info("%s: %d resets\n", __func__, count);
 
-       err = igt_flush_test(gt->i915);
-out:
-       fput(file);
-       if (intel_gt_is_wedged(gt))
+       if (igt_flush_test(gt->i915))
                err = -EIO;
        return err;
 }
        struct intel_gt *gt = arg;
        struct i915_gpu_error *global = >->i915->gpu_error;
        struct intel_engine_cs *engine;
-       struct i915_gem_context *ctx;
        enum intel_engine_id id;
-       struct file *file;
-       int err = 0;
 
        /* Check that we can engine-reset during non-user portions */
 
        if (!intel_has_reset_engine(gt))
                return 0;
 
-       file = mock_file(gt->i915);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       ctx = live_context(gt->i915, file);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto out;
-       }
-
-       i915_gem_context_clear_bannable(ctx);
        for_each_engine(engine, gt, id) {
-               unsigned int reset_count, reset_engine_count;
-               unsigned int count;
+               unsigned int reset_count, reset_engine_count, count;
+               struct intel_context *ce;
+               unsigned long heartbeat;
                IGT_TIMEOUT(end_time);
+               int err;
+
+               ce = intel_context_create(engine);
+               if (IS_ERR(ce))
+                       return PTR_ERR(ce);
 
                reset_count = i915_reset_count(global);
                reset_engine_count = i915_reset_engine_count(global, engine);
                count = 0;
 
+               engine_heartbeat_disable(engine, &heartbeat);
                set_bit(I915_RESET_ENGINE + id, >->reset.flags);
                do {
                        int i;
                        for (i = 0; i < 16; i++) {
                                struct i915_request *rq;
 
-                               rq = igt_request_alloc(ctx, engine);
+                               rq = intel_context_create_request(ce);
                                if (IS_ERR(rq)) {
                                        err = PTR_ERR(rq);
                                        break;
                        }
                } while (time_before(jiffies, end_time));
                clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
-               pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+               engine_heartbeat_enable(engine, heartbeat);
 
-               if (err)
-                       break;
+               pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
 
-               err = igt_flush_test(gt->i915);
+               intel_context_put(ce);
+               if (igt_flush_test(gt->i915))
+                       err = -EIO;
                if (err)
-                       break;
+                       return err;
        }
 
-       err = igt_flush_test(gt->i915);
-out:
-       fput(file);
-       if (intel_gt_is_wedged(gt))
-               err = -EIO;
-       return err;
+       return 0;
 }
 
 static int __igt_reset_engine(struct intel_gt *gt, bool active)
 
        for_each_engine(engine, gt, id) {
                unsigned int reset_count, reset_engine_count;
+               unsigned long heartbeat;
                IGT_TIMEOUT(end_time);
 
                if (active && !intel_engine_can_store_dword(engine))
                reset_count = i915_reset_count(global);
                reset_engine_count = i915_reset_engine_count(global, engine);
 
-               intel_engine_pm_get(engine);
+               engine_heartbeat_disable(engine, &heartbeat);
                set_bit(I915_RESET_ENGINE + id, >->reset.flags);
                do {
                        if (active) {
                        }
                } while (time_before(jiffies, end_time));
                clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
-               intel_engine_pm_put(engine);
+               engine_heartbeat_enable(engine, heartbeat);
 
                if (err)
                        break;
        struct active_engine *arg = data;
        struct intel_engine_cs *engine = arg->engine;
        struct i915_request *rq[8] = {};
-       struct i915_gem_context *ctx[ARRAY_SIZE(rq)];
-       unsigned long count = 0;
-       struct file *file;
+       struct intel_context *ce[ARRAY_SIZE(rq)];
+       unsigned long count;
        int err = 0;
 
-       file = mock_file(engine->i915);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       for (count = 0; count < ARRAY_SIZE(ctx); count++) {
-               ctx[count] = live_context(engine->i915, file);
-               if (IS_ERR(ctx[count])) {
-                       err = PTR_ERR(ctx[count]);
+       for (count = 0; count < ARRAY_SIZE(ce); count++) {
+               ce[count] = intel_context_create(engine);
+               if (IS_ERR(ce[count])) {
+                       err = PTR_ERR(ce[count]);
                        while (--count)
-                               i915_gem_context_put(ctx[count]);
-                       goto err_file;
+                               intel_context_put(ce[count]);
+                       return err;
                }
        }
 
+       count = 0;
        while (!kthread_should_stop()) {
                unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
                struct i915_request *old = rq[idx];
                struct i915_request *new;
 
-               new = igt_request_alloc(ctx[idx], engine);
+               new = intel_context_create_request(ce[idx]);
                if (IS_ERR(new)) {
                        err = PTR_ERR(new);
                        break;
                }
 
-               if (arg->flags & TEST_PRIORITY)
-                       ctx[idx]->sched.priority =
-                               i915_prandom_u32_max_state(512, &prng);
-
                rq[idx] = i915_request_get(new);
                i915_request_add(new);
 
+               if (engine->schedule && arg->flags & TEST_PRIORITY) {
+                       struct i915_sched_attr attr = {
+                               .priority =
+                                       i915_prandom_u32_max_state(512, &prng),
+                       };
+                       engine->schedule(rq[idx], &attr);
+               }
+
                err = active_request_put(old);
                if (err)
                        break;
                /* Keep the first error */
                if (!err)
                        err = err__;
+
+               intel_context_put(ce[count]);
        }
 
-err_file:
-       fput(file);
        return err;
 }
 
                struct active_engine threads[I915_NUM_ENGINES] = {};
                unsigned long device = i915_reset_count(global);
                unsigned long count = 0, reported;
+               unsigned long heartbeat;
                IGT_TIMEOUT(end_time);
 
                if (flags & TEST_ACTIVE &&
 
                yield(); /* start all threads before we begin */
 
-               intel_engine_pm_get(engine);
+               engine_heartbeat_disable(engine, &heartbeat);
                set_bit(I915_RESET_ENGINE + id, >->reset.flags);
                do {
                        struct i915_request *rq = NULL;
                        }
                } while (time_before(jiffies, end_time));
                clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
-               intel_engine_pm_put(engine);
+               engine_heartbeat_enable(engine, heartbeat);
+
                pr_info("i915_reset_engine(%s:%s): %lu resets\n",
                        engine->name, test_name, count);
 
 static int igt_reset_evict_ppgtt(void *arg)
 {
        struct intel_gt *gt = arg;
-       struct i915_gem_context *ctx;
-       struct i915_address_space *vm;
-       struct file *file;
+       struct i915_ppgtt *ppgtt;
        int err;
 
-       file = mock_file(gt->i915);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
+       /* aliasing == global gtt locking, covered above */
+       if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
+               return 0;
 
-       ctx = live_context(gt->i915, file);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto out;
-       }
+       ppgtt = i915_ppgtt_create(gt->i915);
+       if (IS_ERR(ppgtt))
+               return PTR_ERR(ppgtt);
 
-       err = 0;
-       vm = i915_gem_context_get_vm_rcu(ctx);
-       if (!i915_is_ggtt(vm)) {
-               /* aliasing == global gtt locking, covered above */
-               err = __igt_reset_evict_vma(gt, vm,
-                                           evict_vma, EXEC_OBJECT_WRITE);
-       }
-       i915_vm_put(vm);
+       err = __igt_reset_evict_vma(gt, &ppgtt->vm,
+                                   evict_vma, EXEC_OBJECT_WRITE);
+       i915_vm_put(&ppgtt->vm);
 
-out:
-       fput(file);
        return err;
 }
 
 
        return vma;
 }
 
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+                                    unsigned long *saved)
+{
+       *saved = engine->props.heartbeat_interval_ms;
+       engine->props.heartbeat_interval_ms = 0;
+
+       intel_engine_pm_get(engine);
+       intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+                                   unsigned long saved)
+{
+       intel_engine_pm_put(engine);
+
+       engine->props.heartbeat_interval_ms = saved;
+}
+
 static int live_sanitycheck(void *arg)
 {
        struct intel_gt *gt = arg;
-       struct i915_gem_engines_iter it;
-       struct i915_gem_context *ctx;
-       struct intel_context *ce;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
        struct igt_spinner spin;
-       int err = -ENOMEM;
+       int err = 0;
 
        if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
                return 0;
        if (igt_spinner_init(&spin, gt))
                return -ENOMEM;
 
-       ctx = kernel_context(gt->i915);
-       if (!ctx)
-               goto err_spin;
-
-       for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+       for_each_engine(engine, gt, id) {
+               struct intel_context *ce;
                struct i915_request *rq;
 
+               ce = intel_context_create(engine);
+               if (IS_ERR(ce)) {
+                       err = PTR_ERR(ce);
+                       break;
+               }
+
                rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
                if (IS_ERR(rq)) {
                        err = PTR_ERR(rq);
-                       goto err_ctx;
+                       goto out_ctx;
                }
 
                i915_request_add(rq);
                        GEM_TRACE_DUMP();
                        intel_gt_set_wedged(gt);
                        err = -EIO;
-                       goto err_ctx;
+                       goto out_ctx;
                }
 
                igt_spinner_end(&spin);
                if (igt_flush_test(gt->i915)) {
                        err = -EIO;
-                       goto err_ctx;
+                       goto out_ctx;
                }
+
+out_ctx:
+               intel_context_put(ce);
+               if (err)
+                       break;
        }
 
-       err = 0;
-err_ctx:
-       i915_gem_context_unlock_engines(ctx);
-       kernel_context_close(ctx);
-err_spin:
        igt_spinner_fini(&spin);
        return err;
 }
 static int live_unlite_restore(struct intel_gt *gt, int prio)
 {
        struct intel_engine_cs *engine;
-       struct i915_gem_context *ctx;
        enum intel_engine_id id;
        struct igt_spinner spin;
        int err = -ENOMEM;
        if (igt_spinner_init(&spin, gt))
                return err;
 
-       ctx = kernel_context(gt->i915);
-       if (!ctx)
-               goto err_spin;
-
        err = 0;
        for_each_engine(engine, gt, id) {
                struct intel_context *ce[2] = {};
                struct i915_request *rq[2];
                struct igt_live_test t;
+               unsigned long saved;
                int n;
 
                if (prio && !intel_engine_has_preemption(engine))
                        err = -EIO;
                        break;
                }
+               engine_heartbeat_disable(engine, &saved);
 
                for (n = 0; n < ARRAY_SIZE(ce); n++) {
                        struct intel_context *tmp;
 
-                       tmp = intel_context_create(ctx, engine);
+                       tmp = intel_context_create(engine);
                        if (IS_ERR(tmp)) {
                                err = PTR_ERR(tmp);
                                goto err_ce;
                        intel_context_put(ce[n]);
                }
 
+               engine_heartbeat_enable(engine, saved);
                if (igt_live_test_end(&t))
                        err = -EIO;
                if (err)
                        break;
        }
 
-       kernel_context_close(ctx);
-err_spin:
        igt_spinner_fini(&spin);
        return err;
 }
 static struct i915_request *
 semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
 {
-       struct i915_gem_context *ctx;
+       struct intel_context *ce;
        struct i915_request *rq;
        int err;
 
-       ctx = kernel_context(engine->i915);
-       if (!ctx)
-               return ERR_PTR(-ENOMEM);
+       ce = intel_context_create(engine);
+       if (IS_ERR(ce))
+               return ERR_CAST(ce);
 
-       rq = igt_request_alloc(ctx, engine);
+       rq = intel_context_create_request(ce);
        if (IS_ERR(rq))
-               goto out_ctx;
+               goto out_ce;
 
        err = 0;
        if (rq->engine->emit_init_breadcrumb)
        if (err)
                rq = ERR_PTR(err);
 
-out_ctx:
-       kernel_context_close(ctx);
+out_ce:
+       intel_context_put(ce);
        return rq;
 }
 
                enum intel_engine_id id;
 
                for_each_engine(engine, gt, id) {
+                       unsigned long saved;
+
                        if (!intel_engine_has_preemption(engine))
                                continue;
 
                        memset(vaddr, 0, PAGE_SIZE);
 
+                       engine_heartbeat_disable(engine, &saved);
                        err = slice_semaphore_queue(engine, vma, count);
+                       engine_heartbeat_enable(engine, saved);
                        if (err)
                                goto err_pin;
 
                        .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
                };
                struct i915_request *rq, *nop;
+               unsigned long saved;
 
                if (!intel_engine_has_preemption(engine))
                        continue;
 
+               engine_heartbeat_disable(engine, &saved);
                memset(vaddr, 0, PAGE_SIZE);
 
                /* ELSP[0]: semaphore wait */
                rq = semaphore_queue(engine, vma, 0);
                if (IS_ERR(rq)) {
                        err = PTR_ERR(rq);
-                       goto err_pin;
+                       goto err_heartbeat;
                }
                engine->schedule(rq, &attr);
                wait_for_submit(engine, rq);
                nop = nop_request(engine);
                if (IS_ERR(nop)) {
                        err = PTR_ERR(nop);
-                       i915_request_put(rq);
-                       goto err_pin;
+                       goto err_rq;
                }
                wait_for_submit(engine, nop);
                i915_request_put(nop);
 
                /* Queue: semaphore signal, matching priority as semaphore */
                err = release_queue(engine, vma, 1, effective_prio(rq));
-               if (err) {
-                       i915_request_put(rq);
-                       goto err_pin;
-               }
+               if (err)
+                       goto err_rq;
 
                intel_engine_flush_submission(engine);
                if (!READ_ONCE(engine->execlists.timer.expires) &&
                        memset(vaddr, 0xff, PAGE_SIZE);
                        err = -EIO;
                }
+err_rq:
                i915_request_put(rq);
+err_heartbeat:
+               engine_heartbeat_enable(engine, saved);
                if (err)
                        break;
        }
 
-err_pin:
        i915_vma_unpin(vma);
 err_map:
        i915_gem_object_unpin_map(obj);
        u32 *cs;
        int err;
 
-       ce = intel_context_create(engine->kernel_context->gem_context, engine);
+       ce = intel_context_create(engine);
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
 {
        IGT_TIMEOUT(end_time);
        struct i915_request *request[16] = {};
-       struct i915_gem_context *ctx[16];
        struct intel_context *ve[16];
        unsigned long n, prime, nc;
        struct igt_live_test t;
        ktime_t times[2] = {};
        int err;
 
-       GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
+       GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
 
        for (n = 0; n < nctx; n++) {
-               ctx[n] = kernel_context(gt->i915);
-               if (!ctx[n]) {
-                       err = -ENOMEM;
-                       nctx = n;
-                       goto out;
-               }
-
-               ve[n] = intel_execlists_create_virtual(ctx[n],
-                                                      siblings, nsibling);
+               ve[n] = intel_execlists_create_virtual(siblings, nsibling);
                if (IS_ERR(ve[n])) {
-                       kernel_context_close(ctx[n]);
                        err = PTR_ERR(ve[n]);
                        nctx = n;
                        goto out;
                err = intel_context_pin(ve[n]);
                if (err) {
                        intel_context_put(ve[n]);
-                       kernel_context_close(ctx[n]);
                        nctx = n;
                        goto out;
                }
                i915_request_put(request[nc]);
                intel_context_unpin(ve[nc]);
                intel_context_put(ve[nc]);
-               kernel_context_close(ctx[nc]);
        }
        return err;
 }
                               unsigned int nsibling)
 {
        struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
-       struct i915_gem_context *ctx;
        struct intel_context *ve;
        struct igt_live_test t;
        unsigned int n;
         * restrict it to our desired engine within the virtual engine.
         */
 
-       ctx = kernel_context(gt->i915);
-       if (!ctx)
-               return -ENOMEM;
-
-       ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+       ve = intel_execlists_create_virtual(siblings, nsibling);
        if (IS_ERR(ve)) {
                err = PTR_ERR(ve);
                goto out_close;
 out_put:
        intel_context_put(ve);
 out_close:
-       kernel_context_close(ctx);
        return err;
 }
 
                                    unsigned int nsibling)
 {
        struct i915_request *last = NULL;
-       struct i915_gem_context *ctx;
        struct intel_context *ve;
        struct i915_vma *scratch;
        struct igt_live_test t;
        int err = 0;
        u32 *cs;
 
-       ctx = kernel_context(gt->i915);
-       if (!ctx)
-               return -ENOMEM;
-
        scratch = create_scratch(siblings[0]->gt);
-       if (IS_ERR(scratch)) {
-               err = PTR_ERR(scratch);
-               goto out_close;
-       }
+       if (IS_ERR(scratch))
+               return PTR_ERR(scratch);
 
-       ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+       ve = intel_execlists_create_virtual(siblings, nsibling);
        if (IS_ERR(ve)) {
                err = PTR_ERR(ve);
                goto out_scratch;
        intel_context_put(ve);
 out_scratch:
        i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-       kernel_context_close(ctx);
        return err;
 }
 
 #define BOND_SCHEDULE BIT(0)
 {
        struct intel_engine_cs *master;
-       struct i915_gem_context *ctx;
        struct i915_request *rq[16];
        enum intel_engine_id id;
        struct igt_spinner spin;
        if (igt_spinner_init(&spin, gt))
                return -ENOMEM;
 
-       ctx = kernel_context(gt->i915);
-       if (!ctx) {
-               err = -ENOMEM;
-               goto err_spin;
-       }
-
        err = 0;
        rq[0] = ERR_PTR(-ENOMEM);
        for_each_engine(master, gt, id) {
 
                memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
 
-               rq[0] = spinner_create_request(&spin, ctx, master, MI_NOOP);
+               rq[0] = igt_spinner_create_request(&spin,
+                                                  master->kernel_context,
+                                                  MI_NOOP);
                if (IS_ERR(rq[0])) {
                        err = PTR_ERR(rq[0]);
                        goto out;
                for (n = 0; n < nsibling; n++) {
                        struct intel_context *ve;
 
-                       ve = intel_execlists_create_virtual(ctx,
-                                                           siblings,
-                                                           nsibling);
+                       ve = intel_execlists_create_virtual(siblings, nsibling);
                        if (IS_ERR(ve)) {
                                err = PTR_ERR(ve);
                                onstack_fence_fini(&fence);
        if (igt_flush_test(gt->i915))
                err = -EIO;
 
-       kernel_context_close(ctx);
-err_spin:
        igt_spinner_fini(&spin);
        return err;
 }
        return err;
 }
 
-static int __live_lrc_state(struct i915_gem_context *fixme,
-                           struct intel_engine_cs *engine,
+static int __live_lrc_state(struct intel_engine_cs *engine,
                            struct i915_vma *scratch)
 {
        struct intel_context *ce;
        int err;
        int n;
 
-       ce = intel_context_create(fixme, engine);
+       ce = intel_context_create(engine);
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
 {
        struct intel_gt *gt = arg;
        struct intel_engine_cs *engine;
-       struct i915_gem_context *fixme;
        struct i915_vma *scratch;
        enum intel_engine_id id;
        int err = 0;
         * intel_context.
         */
 
-       fixme = kernel_context(gt->i915);
-       if (!fixme)
-               return -ENOMEM;
-
        scratch = create_scratch(gt);
-       if (IS_ERR(scratch)) {
-               err = PTR_ERR(scratch);
-               goto out_close;
-       }
+       if (IS_ERR(scratch))
+               return PTR_ERR(scratch);
 
        for_each_engine(engine, gt, id) {
-               err = __live_lrc_state(fixme, engine, scratch);
+               err = __live_lrc_state(engine, scratch);
                if (err)
                        break;
        }
                err = -EIO;
 
        i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-       kernel_context_close(fixme);
        return err;
 }
 
        return 0;
 }
 
-static int __live_gpr_clear(struct i915_gem_context *fixme,
-                           struct intel_engine_cs *engine,
+static int __live_gpr_clear(struct intel_engine_cs *engine,
                            struct i915_vma *scratch)
 {
        struct intel_context *ce;
        if (err)
                return err;
 
-       ce = intel_context_create(fixme, engine);
+       ce = intel_context_create(engine);
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
 {
        struct intel_gt *gt = arg;
        struct intel_engine_cs *engine;
-       struct i915_gem_context *fixme;
        struct i915_vma *scratch;
        enum intel_engine_id id;
        int err = 0;
         * to avoid leaking any information from previous contexts.
         */
 
-       fixme = kernel_context(gt->i915);
-       if (!fixme)
-               return -ENOMEM;
-
        scratch = create_scratch(gt);
-       if (IS_ERR(scratch)) {
-               err = PTR_ERR(scratch);
-               goto out_close;
-       }
+       if (IS_ERR(scratch))
+               return PTR_ERR(scratch);
 
        for_each_engine(engine, gt, id) {
-               err = __live_gpr_clear(fixme, engine, scratch);
+               err = __live_gpr_clear(engine, scratch);
                if (err)
                        break;
        }
                err = -EIO;
 
        i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-       kernel_context_close(fixme);
        return err;
 }
 
 
        for_each_engine(engine, gt, id) {
                struct intel_context *ce;
 
-               ce = intel_context_create(engine->kernel_context->gem_context,
-                                         engine);
+               ce = intel_context_create(engine);
                if (IS_ERR(ce)) {
                        err = PTR_ERR(ce);
                        break;
        for_each_engine(engine, gt, id) {
                struct intel_context *ce;
 
-               ce = intel_context_create(engine->kernel_context->gem_context,
-                                         engine);
+               ce = intel_context_create(engine);
                if (IS_ERR(ce)) {
                        err = PTR_ERR(ce);
                        break;
 
                        const u32 *res;
 
                        /* Use a sacrifical context */
-                       ce = intel_context_create(engine->kernel_context->gem_context,
-                                                 engine);
+                       ce = intel_context_create(engine);
                        if (IS_ERR(ce)) {
                                err = PTR_ERR(ce);
                                goto out;
 
 switch_to_scratch_context(struct intel_engine_cs *engine,
                          struct igt_spinner *spin)
 {
-       struct i915_gem_context *ctx;
        struct intel_context *ce;
        struct i915_request *rq;
        int err = 0;
 
-       ctx = kernel_context(engine->i915);
-       if (IS_ERR(ctx))
-               return PTR_ERR(ctx);
-
-       GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
-
-       ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
-       GEM_BUG_ON(IS_ERR(ce));
+       ce = intel_context_create(engine);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
 
        rq = igt_spinner_create_request(spin, ce, MI_NOOP);
-
        intel_context_put(ce);
 
        if (IS_ERR(rq)) {
        if (err && spin)
                igt_spinner_end(spin);
 
-       kernel_context_close(ctx);
        return err;
 }
 
        return err;
 }
 
-static struct i915_vma *create_batch(struct i915_gem_context *ctx)
+static struct i915_vma *create_batch(struct i915_address_space *vm)
 {
        struct drm_i915_gem_object *obj;
-       struct i915_address_space *vm;
        struct i915_vma *vma;
        int err;
 
-       obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
+       obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
-       vm = i915_gem_context_get_vm_rcu(ctx);
        vma = i915_vma_instance(obj, vm, NULL);
-       i915_vm_put(vm);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err_obj;
        return count;
 }
 
-static int check_dirty_whitelist(struct i915_gem_context *ctx,
-                                struct intel_engine_cs *engine)
+static int check_dirty_whitelist(struct intel_context *ce)
 {
        const u32 values[] = {
                0x00000000,
                0xffff00ff,
                0xffffffff,
        };
-       struct i915_address_space *vm;
+       struct intel_engine_cs *engine = ce->engine;
        struct i915_vma *scratch;
        struct i915_vma *batch;
        int err = 0, i, v;
        u32 *cs, *results;
 
-       vm = i915_gem_context_get_vm_rcu(ctx);
-       scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
-       i915_vm_put(vm);
+       scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
        if (IS_ERR(scratch))
                return PTR_ERR(scratch);
 
-       batch = create_batch(ctx);
+       batch = create_batch(ce->vm);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
                goto out_scratch;
 
                srm = MI_STORE_REGISTER_MEM;
                lrm = MI_LOAD_REGISTER_MEM;
-               if (INTEL_GEN(ctx->i915) >= 8)
+               if (INTEL_GEN(engine->i915) >= 8)
                        lrm++, srm++;
 
                pr_debug("%s: Writing garbage to %x\n",
                i915_gem_object_unpin_map(batch->obj);
                intel_gt_chipset_flush(engine->gt);
 
-               rq = igt_request_alloc(ctx, engine);
+               rq = intel_context_create_request(ce);
                if (IS_ERR(rq)) {
                        err = PTR_ERR(rq);
                        goto out_batch;
                        break;
        }
 
-       if (igt_flush_test(ctx->i915))
+       if (igt_flush_test(engine->i915))
                err = -EIO;
 out_batch:
        i915_vma_unpin_and_release(&batch, 0);
 {
        struct intel_gt *gt = arg;
        struct intel_engine_cs *engine;
-       struct i915_gem_context *ctx;
        enum intel_engine_id id;
-       struct file *file;
-       int err = 0;
 
        /* Can the user write to the whitelisted registers? */
 
        if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
                return 0;
 
-       file = mock_file(gt->i915);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       ctx = live_context(gt->i915, file);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto out_file;
-       }
-
        for_each_engine(engine, gt, id) {
+               struct intel_context *ce;
+               int err;
+
                if (engine->whitelist.count == 0)
                        continue;
 
-               err = check_dirty_whitelist(ctx, engine);
+               ce = intel_context_create(engine);
+               if (IS_ERR(ce))
+                       return PTR_ERR(ce);
+
+               err = check_dirty_whitelist(ce);
+               intel_context_put(ce);
                if (err)
-                       goto out_file;
+                       return err;
        }
 
-out_file:
-       fput(file);
-       return err;
+       return 0;
 }
 
 static int live_reset_whitelist(void *arg)
 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
                                       struct intel_engine_cs *engine)
 {
+       struct i915_address_space *vm;
        struct i915_request *rq;
        struct i915_vma *batch;
        int i, err = 0;
        u32 *cs;
 
-       batch = create_batch(ctx);
+       vm = i915_gem_context_get_vm_rcu(ctx);
+       batch = create_batch(vm);
+       i915_vm_put(vm);
        if (IS_ERR(batch))
                return PTR_ERR(batch);
 
 
 
 #include <linux/kthread.h>
 
-#include "gem/i915_gem_context.h"
 #include "gem/i915_gem_pm.h"
 #include "gt/intel_context.h"
 #include "gt/intel_ring.h"
 
 #include "i915_drv.h"
+#include "i915_gem_gtt.h"
 #include "gvt.h"
 
 #define RING_CTX_OFF(x) \
        struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
        struct intel_vgpu_submission *s = &vgpu->submission;
        struct intel_engine_cs *engine;
-       struct i915_gem_context *ctx;
        struct i915_ppgtt *ppgtt;
        enum intel_engine_id i;
        int ret;
 
-       ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
-       if (IS_ERR(ctx))
-               return PTR_ERR(ctx);
+       ppgtt = i915_ppgtt_create(i915);
+       if (IS_ERR(ppgtt))
+               return PTR_ERR(ppgtt);
 
-       ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
        i915_context_ppgtt_root_save(s, ppgtt);
 
        for_each_engine(engine, i915, i) {
                INIT_LIST_HEAD(&s->workload_q_head[i]);
                s->shadow[i] = ERR_PTR(-EINVAL);
 
-               ce = intel_context_create(ctx, engine);
+               ce = intel_context_create(engine);
                if (IS_ERR(ce)) {
                        ret = PTR_ERR(ce);
                        goto out_shadow_ctx;
                }
 
+               i915_vm_put(ce->vm);
+               ce->vm = i915_vm_get(&ppgtt->vm);
                intel_context_set_single_submission(ce);
 
                if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
        bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
 
        i915_vm_put(&ppgtt->vm);
-       i915_gem_context_put(ctx);
        return 0;
 
 out_shadow_ctx:
                intel_context_put(s->shadow[i]);
        }
        i915_vm_put(&ppgtt->vm);
-       i915_gem_context_put(ctx);
        return ret;
 }
 
 
 
 #include <linux/debugobjects.h>
 
+#include "gt/intel_context.h"
 #include "gt/intel_engine_pm.h"
 #include "gt/intel_ring.h"
 
        struct llist_node *node, *next;
        unsigned long flags;
 
+       GEM_BUG_ON(!intel_context_is_barrier(rq->context));
        GEM_BUG_ON(intel_engine_is_virtual(engine));
        GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
 
 
 
        struct pci_dev *bridge_dev;
 
-       /* Context used internally to idle the GPU and setup initial state */
-       struct i915_gem_context *kernel_context;
-
        struct intel_engine_cs *engine[I915_NUM_ENGINES];
        struct rb_root uabi_engines;
 
 
                GEM_BUG_ON(!engine->kernel_context);
                engine->serial++; /* force the kernel context switch */
 
-               ce = intel_context_create(engine->kernel_context->gem_context,
-                                         engine);
+               ce = intel_context_create(engine);
                if (IS_ERR(ce)) {
                        err = PTR_ERR(ce);
                        goto out;
                        continue;
 
                /* Serialise with retirement on another CPU */
+               GEM_BUG_ON(!i915_request_completed(rq));
                err = __intel_context_flush_retire(rq->context);
                if (err)
                        goto out;
        }
 
        intel_gt_init(&dev_priv->gt);
+       i915_gem_init__contexts(dev_priv);
 
        ret = intel_engines_setup(&dev_priv->gt);
        if (ret) {
                goto err_unlock;
        }
 
-       ret = i915_gem_init_contexts(dev_priv);
-       if (ret) {
-               GEM_BUG_ON(ret == -EIO);
-               goto err_scratch;
-       }
-
        ret = intel_engines_init(&dev_priv->gt);
        if (ret) {
                GEM_BUG_ON(ret == -EIO);
-               goto err_context;
+               goto err_scratch;
        }
 
        intel_uc_init(&dev_priv->gt.uc);
                intel_uc_fini(&dev_priv->gt.uc);
                intel_engines_cleanup(&dev_priv->gt);
        }
-err_context:
-       if (ret != -EIO)
-               i915_gem_driver_release__contexts(dev_priv);
 err_scratch:
        intel_gt_driver_release(&dev_priv->gt);
 err_unlock:
 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 {
        intel_engines_cleanup(&dev_priv->gt);
-       i915_gem_driver_release__contexts(dev_priv);
        intel_gt_driver_release(&dev_priv->gt);
 
        intel_wa_list_free(&dev_priv->gt_wa_list);
        intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
        i915_gem_cleanup_userptr(dev_priv);
 
+       i915_gem_driver_release__contexts(dev_priv);
+
        i915_gem_drain_freed_objects(dev_priv);
 
        WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
 
         * If everybody agrees to not to write into the scratch page,
         * we can reuse it for all vm, keeping contexts and processes separate.
         */
-       if (vm->has_read_only &&
-           vm->i915->kernel_context &&
-           vm->i915->kernel_context->vm) {
-               struct i915_address_space *clone =
-                       rcu_dereference_protected(vm->i915->kernel_context->vm,
-                                                 true); /* static */
+       if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
+               struct i915_address_space *clone = vm->gt->vm;
 
                GEM_BUG_ON(!clone->has_read_only);
 
 
         */
        spin_lock(&i915->gem.contexts.lock);
        list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
-               if (ctx == i915->kernel_context)
-                       continue;
-
                if (!kref_get_unless_zero(&ctx->ref))
                        continue;
 
 
 
 void i915_request_add(struct i915_request *rq)
 {
-       struct i915_sched_attr attr = rq->context->gem_context->sched;
        struct intel_timeline * const tl = i915_request_timeline(rq);
+       struct i915_sched_attr attr = {};
        struct i915_request *prev;
 
        lockdep_assert_held(&tl->mutex);
 
        prev = __i915_request_commit(rq);
 
+       if (rq->context->gem_context)
+               attr = rq->context->gem_context->sched;
+
        /*
         * Boost actual workloads past semaphores!
         *
 
        struct i915_address_space *vm = vma->vm;
        int err;
 
+       if (!drm_mm_node_allocated(&vma->node))
+               return 0;
+
        err = mutex_lock_interruptible(&vm->mutex);
        if (err)
                return err;
 
 
 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 {
-       struct i915_gem_context *ctx = i915->kernel_context;
        struct drm_i915_gem_object *obj;
        const int gen = INTEL_GEN(i915);
-       struct i915_address_space *vm;
        struct i915_vma *vma;
        u32 *cmd;
        int err;
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
-       vm = i915_gem_context_get_vm_rcu(ctx);
-       vma = i915_vma_instance(obj, vm, NULL);
-       i915_vm_put(vm);
+       vma = i915_vma_instance(obj, i915->gt.vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err;
 
 
        for_each_engine(engine, &i915->gt, id)
                mock_engine_free(engine);
-       i915_gem_driver_release__contexts(i915);
 
        drain_workqueue(i915->wq);
        i915_gem_drain_freed_objects(i915);
        mock_init_contexts(i915);
 
        mock_init_ggtt(i915, &i915->ggtt);
+       i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
 
        mkwrite_device_info(i915)->engine_mask = BIT(0);
 
        if (!i915->engine[RCS0])
                goto err_unlock;
 
-       i915->kernel_context = mock_context(i915, NULL);
-       if (!i915->kernel_context)
-               goto err_engine;
-
        if (mock_engine_init(i915->engine[RCS0]))
                goto err_context;
 
        return i915;
 
 err_context:
-       i915_gem_driver_release__contexts(i915);
-err_engine:
        mock_engine_free(i915->engine[RCS0]);
 err_unlock:
        destroy_workqueue(i915->wq);