static void i915_gem_context_free(struct i915_gem_context *ctx)
 {
-       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
+       spin_lock(&ctx->i915->gem.contexts.lock);
+       list_del(&ctx->link);
+       spin_unlock(&ctx->i915->gem.contexts.lock);
+
        free_engines(rcu_access_pointer(ctx->engines));
        mutex_destroy(&ctx->engines_mutex);
 
        kfree(ctx->name);
        put_pid(ctx->pid);
 
-       list_del(&ctx->link);
        mutex_destroy(&ctx->mutex);
 
        kfree_rcu(ctx, rcu);
 }
 
-static void contexts_free(struct drm_i915_private *i915)
+static void contexts_free_all(struct llist_node *list)
 {
-       struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
        struct i915_gem_context *ctx, *cn;
 
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
-       llist_for_each_entry_safe(ctx, cn, freed, free_link)
+       llist_for_each_entry_safe(ctx, cn, list, free_link)
                i915_gem_context_free(ctx);
 }
 
-static void contexts_free_first(struct drm_i915_private *i915)
+static void contexts_flush_free(struct i915_gem_contexts *gc)
 {
-       struct i915_gem_context *ctx;
-       struct llist_node *freed;
-
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
-       freed = llist_del_first(&i915->contexts.free_list);
-       if (!freed)
-               return;
-
-       ctx = container_of(freed, typeof(*ctx), free_link);
-       i915_gem_context_free(ctx);
+       contexts_free_all(llist_del_all(&gc->free_list));
 }
 
 static void contexts_free_worker(struct work_struct *work)
 {
-       struct drm_i915_private *i915 =
-               container_of(work, typeof(*i915), contexts.free_work);
+       struct i915_gem_contexts *gc =
+               container_of(work, typeof(*gc), free_work);
 
-       mutex_lock(&i915->drm.struct_mutex);
-       contexts_free(i915);
-       mutex_unlock(&i915->drm.struct_mutex);
+       contexts_flush_free(gc);
 }
 
 void i915_gem_context_release(struct kref *ref)
 {
        struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
-       struct drm_i915_private *i915 = ctx->i915;
+       struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
 
        trace_i915_context_free(ctx);
-       if (llist_add(&ctx->free_link, &i915->contexts.free_list))
-               queue_work(i915->wq, &i915->contexts.free_work);
+       if (llist_add(&ctx->free_link, &gc->free_list))
+               schedule_work(&gc->free_work);
 }
 
 static void context_close(struct i915_gem_context *ctx)
 {
-       i915_gem_context_set_closed(ctx);
+       struct i915_address_space *vm;
 
-       if (ctx->vm)
-               i915_vm_close(ctx->vm);
+       i915_gem_context_set_closed(ctx);
 
        mutex_lock(&ctx->mutex);
 
+       vm = i915_gem_context_vm(ctx);
+       if (vm)
+               i915_vm_close(vm);
+
        ctx->file_priv = ERR_PTR(-EBADF);
 
        /*
                return ERR_PTR(-ENOMEM);
 
        kref_init(&ctx->ref);
-       list_add_tail(&ctx->link, &i915->contexts.list);
        ctx->i915 = i915;
        ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
        mutex_init(&ctx->mutex);
        for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
                ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
 
+       spin_lock(&i915->gem.contexts.lock);
+       list_add_tail(&ctx->link, &i915->gem.contexts.list);
+       spin_unlock(&i915->gem.contexts.lock);
+
        return ctx;
 
 err_free:
 static struct i915_address_space *
 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
 {
-       struct i915_address_space *old = ctx->vm;
+       struct i915_address_space *old = i915_gem_context_vm(ctx);
 
        GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
 
-       ctx->vm = i915_vm_open(vm);
+       rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
        context_apply_all(ctx, __apply_ppgtt, vm);
 
        return old;
 static void __assign_ppgtt(struct i915_gem_context *ctx,
                           struct i915_address_space *vm)
 {
-       if (vm == ctx->vm)
+       if (vm == rcu_access_pointer(ctx->vm))
                return;
 
        vm = __set_ppgtt(ctx, vm);
 }
 
 static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
+i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
 {
        struct i915_gem_context *ctx;
 
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
        if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
-           !HAS_EXECLISTS(dev_priv))
+           !HAS_EXECLISTS(i915))
                return ERR_PTR(-EINVAL);
 
-       /* Reap the most stale context */
-       contexts_free_first(dev_priv);
+       /* Reap the stale contexts */
+       contexts_flush_free(&i915->gem.contexts);
 
-       ctx = __create_context(dev_priv);
+       ctx = __create_context(i915);
        if (IS_ERR(ctx))
                return ctx;
 
-       if (HAS_FULL_PPGTT(dev_priv)) {
+       if (HAS_FULL_PPGTT(i915)) {
                struct i915_ppgtt *ppgtt;
 
-               ppgtt = i915_ppgtt_create(dev_priv);
+               ppgtt = i915_ppgtt_create(i915);
                if (IS_ERR(ppgtt)) {
                        DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
                                         PTR_ERR(ppgtt));
                        return ERR_CAST(ppgtt);
                }
 
+               mutex_lock(&ctx->mutex);
                __assign_ppgtt(ctx, &ppgtt->vm);
+               mutex_unlock(&ctx->mutex);
+
                i915_vm_put(&ppgtt->vm);
        }
 
        if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
                struct intel_timeline *timeline;
 
-               timeline = intel_timeline_create(&dev_priv->gt, NULL);
+               timeline = intel_timeline_create(&i915->gt, NULL);
                if (IS_ERR(timeline)) {
                        context_close(ctx);
                        return ERR_CAST(timeline);
        return ctx;
 }
 
-static void init_contexts(struct drm_i915_private *i915)
+static void init_contexts(struct i915_gem_contexts *gc)
 {
-       mutex_init(&i915->contexts.mutex);
-       INIT_LIST_HEAD(&i915->contexts.list);
+       spin_lock_init(&gc->lock);
+       INIT_LIST_HEAD(&gc->list);
 
-       INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
-       init_llist_head(&i915->contexts.free_list);
+       INIT_WORK(&gc->free_work, contexts_free_worker);
+       init_llist_head(&gc->free_list);
 }
 
-int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
+int i915_gem_init_contexts(struct drm_i915_private *i915)
 {
        struct i915_gem_context *ctx;
 
        /* Reassure ourselves we are only called once */
-       GEM_BUG_ON(dev_priv->kernel_context);
+       GEM_BUG_ON(i915->kernel_context);
 
-       init_contexts(dev_priv);
+       init_contexts(&i915->gem.contexts);
 
        /* lowest priority; idle task */
-       ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
+       ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
        if (IS_ERR(ctx)) {
                DRM_ERROR("Failed to create default global context\n");
                return PTR_ERR(ctx);
        }
-       dev_priv->kernel_context = ctx;
+       i915->kernel_context = ctx;
 
        DRM_DEBUG_DRIVER("%s context support initialized\n",
-                        DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+                        DRIVER_CAPS(i915)->has_logical_contexts ?
                         "logical" : "fake");
        return 0;
 }
 
-void i915_gem_contexts_fini(struct drm_i915_private *i915)
+void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
 {
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
        destroy_kernel_context(&i915->kernel_context);
 }
 
 static int gem_context_register(struct i915_gem_context *ctx,
                                struct drm_i915_file_private *fpriv)
 {
+       struct i915_address_space *vm;
        int ret;
 
        ctx->file_priv = fpriv;
-       if (ctx->vm)
-               ctx->vm->file = fpriv;
+
+       mutex_lock(&ctx->mutex);
+       vm = i915_gem_context_vm(ctx);
+       if (vm)
+               WRITE_ONCE(vm->file, fpriv); /* XXX */
+       mutex_unlock(&ctx->mutex);
 
        ctx->pid = get_task_pid(current, PIDTYPE_PID);
        ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
        idr_init(&file_priv->context_idr);
        idr_init_base(&file_priv->vm_idr, 1);
 
-       mutex_lock(&i915->drm.struct_mutex);
        ctx = i915_gem_create_context(i915, 0);
-       mutex_unlock(&i915->drm.struct_mutex);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
                goto err;
 void i915_gem_context_close(struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
+       struct drm_i915_private *i915 = file_priv->dev_priv;
 
        idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
        idr_destroy(&file_priv->context_idr);
        idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
        idr_destroy(&file_priv->vm_idr);
        mutex_destroy(&file_priv->vm_idr_lock);
+
+       contexts_flush_free(&i915->gem.contexts);
 }
 
 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
        struct i915_address_space *vm;
        int ret;
 
-       if (!ctx->vm)
+       if (!rcu_access_pointer(ctx->vm))
                return -ENODEV;
 
-       /* XXX rcu acquire? */
-       ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
-       if (ret)
-               return ret;
-
+       rcu_read_lock();
        vm = i915_vm_get(ctx->vm);
-       mutex_unlock(&ctx->i915->drm.struct_mutex);
+       rcu_read_unlock();
 
        ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
        if (ret)
        if (args->size)
                return -EINVAL;
 
-       if (!ctx->vm)
+       if (!rcu_access_pointer(ctx->vm))
                return -ENODEV;
 
        if (upper_32_bits(args->value))
        if (!vm)
                return -ENOENT;
 
-       err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+       err = mutex_lock_interruptible(&ctx->mutex);
        if (err)
                goto out;
 
-       if (vm == ctx->vm)
+       if (i915_gem_context_is_closed(ctx)) {
+               err = -ENOENT;
+               goto out;
+       }
+
+       if (vm == rcu_access_pointer(ctx->vm))
                goto unlock;
 
        /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
-       mutex_lock(&ctx->mutex);
        lut_close(ctx);
-       mutex_unlock(&ctx->mutex);
 
        old = __set_ppgtt(ctx, vm);
 
        }
 
 unlock:
-       mutex_unlock(&ctx->i915->drm.struct_mutex);
-
+       mutex_unlock(&ctx->mutex);
 out:
        i915_vm_put(vm);
        return err;
                    struct i915_gem_context *src)
 {
        struct i915_address_space *vm;
+       int err = 0;
 
        rcu_read_lock();
        do {
-               vm = READ_ONCE(src->vm);
+               vm = rcu_dereference(src->vm);
                if (!vm)
                        break;
 
                 * it cannot be reallocated elsewhere.
                 */
 
-               if (vm == READ_ONCE(src->vm))
+               if (vm == rcu_access_pointer(src->vm))
                        break;
 
                i915_vm_put(vm);
        rcu_read_unlock();
 
        if (vm) {
-               __assign_ppgtt(dst, vm);
+               if (!mutex_lock_interruptible(&dst->mutex)) {
+                       __assign_ppgtt(dst, vm);
+                       mutex_unlock(&dst->mutex);
+               } else {
+                       err = -EINTR;
+               }
                i915_vm_put(vm);
        }
 
-       return 0;
+       return err;
 }
 
 static int create_clone(struct i915_user_extension __user *ext, void *data)
                return -EIO;
        }
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-
        ext_data.ctx = i915_gem_create_context(i915, args->flags);
-       mutex_unlock(&dev->struct_mutex);
        if (IS_ERR(ext_data.ctx))
                return PTR_ERR(ext_data.ctx);
 
 
        case I915_CONTEXT_PARAM_GTT_SIZE:
                args->size = 0;
-               if (ctx->vm)
-                       args->value = ctx->vm->total;
+               rcu_read_lock();
+               if (rcu_access_pointer(ctx->vm))
+                       args->value = rcu_dereference(ctx->vm)->total;
                else
                        args->value = to_i915(dev)->ggtt.vm.total;
+               rcu_read_unlock();
                break;
 
        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
                                       void *data, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *i915 = to_i915(dev);
        struct drm_i915_reset_stats *args = data;
        struct i915_gem_context *ctx;
        int ret;
         */
 
        if (capable(CAP_SYS_ADMIN))
-               args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+               args->reset_count = i915_reset_count(&i915->gpu_error);
        else
                args->reset_count = 0;
 
 
 
 #include "gt/intel_context.h"
 
+#include "i915_drv.h"
 #include "i915_gem.h"
+#include "i915_gem_gtt.h"
 #include "i915_scheduler.h"
 #include "intel_device_info.h"
 
 }
 
 /* i915_gem_context.c */
-int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
                          struct drm_file *file);
        kref_put(&ctx->ref, i915_gem_context_release);
 }
 
+static inline struct i915_address_space *
+i915_gem_context_vm(struct i915_gem_context *ctx)
+{
+       return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
+}
+
+static inline struct i915_address_space *
+i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
+{
+       struct i915_address_space *vm;
+
+       rcu_read_lock();
+       vm = rcu_dereference(ctx->vm);
+       if (!vm)
+               vm = &ctx->i915->ggtt.vm;
+       vm = i915_vm_get(vm);
+       rcu_read_unlock();
+
+       return vm;
+}
+
 static inline struct i915_gem_engines *
 i915_gem_context_engines(struct i915_gem_context *ctx)
 {
 
         * In other modes, this is a NULL pointer with the expectation that
         * the caller uses the shared global GTT.
         */
-       struct i915_address_space *vm;
+       struct i915_address_space __rcu *vm;
 
        /**
         * @pid: process id of creator
 
                return -ENOENT;
 
        eb->gem_context = ctx;
-       if (ctx->vm)
+       if (rcu_access_pointer(ctx->vm))
                eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
 
        eb->context_flags = 0;
 
                 * On almost all of the older hw, we cannot tell the GPU that
                 * a page is readonly.
                 */
-               vm = dev_priv->kernel_context->vm;
+               vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
+                                              true); /* static vm */
                if (!vm || !vm->has_read_only)
                        return -ENODEV;
        }
 
        struct i915_gem_context *ctx = arg;
        struct drm_i915_private *dev_priv = ctx->i915;
        unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
-       struct i915_address_space *vm = ctx->vm;
        struct drm_i915_gem_object *obj;
        struct i915_gem_engines_iter it;
+       struct i915_address_space *vm;
        struct intel_context *ce;
        struct i915_vma *vma;
        unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
        unsigned int n;
        int first, last;
-       int err;
+       int err = 0;
 
        /*
         * Make sure there's no funny business when doing a PIN_UPDATE -- in the
         * huge-gtt-pages.
         */
 
-       if (!vm || !i915_vm_is_4lvl(vm)) {
+       vm = i915_gem_context_get_vm_rcu(ctx);
+       if (!i915_vm_is_4lvl(vm)) {
                pr_info("48b PPGTT not supported, skipping\n");
-               return 0;
+               goto out_vm;
        }
 
        first = ilog2(I915_GTT_PAGE_SIZE_64K);
        i915_vma_close(vma);
 out_put:
        i915_gem_object_put(obj);
+out_vm:
+       i915_vm_put(vm);
 
        return err;
 }
        struct i915_gem_context *ctx = arg;
        struct drm_i915_private *i915 = ctx->i915;
        struct vfsmount *gemfs = i915->mm.gemfs;
-       struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+       struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
        u32 *vaddr;
 out_restore:
        i915->mm.gemfs = gemfs;
 
+       i915_vm_put(vm);
        return err;
 }
 
 {
        struct i915_gem_context *ctx = arg;
        struct drm_i915_private *i915 = ctx->i915;
-       struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+       struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
        struct drm_i915_gem_object *obj;
        struct i915_gem_engines_iter it;
        struct intel_context *ce;
        struct i915_vma *vma;
        unsigned int flags = PIN_USER;
        unsigned int n;
-       int err;
+       int err = 0;
 
        /*
         * Sanity check shrinking huge-paged object -- make sure nothing blows
 
        if (!igt_can_allocate_thp(i915)) {
                pr_info("missing THP support, skipping\n");
-               return 0;
+               goto out_vm;
        }
 
        obj = i915_gem_object_create_shmem(i915, SZ_2M);
-       if (IS_ERR(obj))
-               return PTR_ERR(obj);
+       if (IS_ERR(obj)) {
+               err = PTR_ERR(obj);
+               goto out_vm;
+       }
 
        vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(vma)) {
        i915_vma_close(vma);
 out_put:
        i915_gem_object_put(obj);
+out_vm:
+       i915_vm_put(vm);
 
        return err;
 }
        };
        struct drm_file *file;
        struct i915_gem_context *ctx;
+       struct i915_address_space *vm;
        intel_wakeref_t wakeref;
        int err;
 
                goto out_unlock;
        }
 
-       if (ctx->vm)
-               ctx->vm->scrub_64K = true;
+       mutex_lock(&ctx->mutex);
+       vm = i915_gem_context_vm(ctx);
+       if (vm)
+               WRITE_ONCE(vm->scrub_64K, true);
+       mutex_unlock(&ctx->mutex);
 
        err = i915_subtests(tests, ctx);
 
 
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       mutex_lock(&i915->drm.struct_mutex);
-
        ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
        if (!ctx) {
                err = -ENOMEM;
-               goto out_unlock;
+               goto out_file;
        }
 
        for (n = 0; n < nctx; n++) {
                ctx[n] = live_context(i915, file);
                if (IS_ERR(ctx[n])) {
                        err = PTR_ERR(ctx[n]);
-                       goto out_unlock;
+                       goto out_file;
                }
        }
 
                        rq = igt_request_alloc(ctx[n], engine);
                        if (IS_ERR(rq)) {
                                err = PTR_ERR(rq);
-                               goto out_unlock;
+                               goto out_file;
                        }
                        i915_request_add(rq);
                }
                        pr_err("Failed to populated %d contexts\n", nctx);
                        intel_gt_set_wedged(&i915->gt);
                        err = -EIO;
-                       goto out_unlock;
+                       goto out_file;
                }
 
                times[1] = ktime_get_raw();
 
                err = igt_live_test_begin(&t, i915, __func__, engine->name);
                if (err)
-                       goto out_unlock;
+                       goto out_file;
 
                end_time = jiffies + i915_selftest.timeout_jiffies;
                for_each_prime_number_from(prime, 2, 8192) {
                                rq = igt_request_alloc(ctx[n % nctx], engine);
                                if (IS_ERR(rq)) {
                                        err = PTR_ERR(rq);
-                                       goto out_unlock;
+                                       goto out_file;
                                }
 
                                /*
 
                err = igt_live_test_end(&t);
                if (err)
-                       goto out_unlock;
+                       goto out_file;
 
                pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
                        engine->name,
                        prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
        }
 
-out_unlock:
-       mutex_unlock(&i915->drm.struct_mutex);
+out_file:
        mock_file_free(i915, file);
        return err;
 }
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       mutex_lock(&i915->drm.struct_mutex);
-
        ctx = live_context(i915, file);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
-               goto out_locked;
+               goto out_file;
        }
 
        engines = i915_gem_context_lock_engines(ctx);
        if (!data) {
                i915_gem_context_unlock_engines(ctx);
                err = -ENOMEM;
-               goto out_locked;
+               goto out;
        }
 
        m = 0; /* Use the first context as our template for the engines */
                err = intel_context_pin(ce);
                if (err) {
                        i915_gem_context_unlock_engines(ctx);
-                       goto out_locked;
+                       goto out;
                }
                data[m++].ce[0] = intel_context_get(ce);
        }
                ctx = live_context(i915, file);
                if (IS_ERR(ctx)) {
                        err = PTR_ERR(ctx);
-                       goto out_locked;
+                       goto out;
                }
 
                for (m = 0; m < count; m++) {
 
                        ce = intel_context_create(ctx, data[m].ce[0]->engine);
                        if (IS_ERR(ce))
-                               goto out_locked;
+                               goto out;
 
                        err = intel_context_pin(ce);
                        if (err) {
                                intel_context_put(ce);
-                               goto out_locked;
+                               goto out;
                        }
 
                        data[m].ce[n] = ce;
                }
        }
 
-       mutex_unlock(&i915->drm.struct_mutex);
-
        for (fn = func; !err && *fn; fn++) {
                struct igt_live_test t;
                int n;
                mutex_unlock(&i915->drm.struct_mutex);
        }
 
-       mutex_lock(&i915->drm.struct_mutex);
-out_locked:
+out:
        for (n = 0; n < count; n++) {
                for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
                        if (!data[n].ce[m])
                        intel_context_put(data[n].ce[m]);
                }
        }
-       mutex_unlock(&i915->drm.struct_mutex);
        kfree(data);
+out_file:
        mock_file_free(i915, file);
        return err;
 }
                if (IS_ERR(file))
                        return PTR_ERR(file);
 
-               mutex_lock(&i915->drm.struct_mutex);
-
                err = igt_live_test_begin(&t, i915, __func__, engine->name);
                if (err)
-                       goto out_unlock;
+                       goto out_file;
 
                ncontexts = 0;
                ndwords = 0;
                        ctx = kernel_context(i915);
                        if (IS_ERR(ctx)) {
                                err = PTR_ERR(ctx);
-                               goto out_unlock;
+                               goto out_file;
                        }
 
                        ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
                                        err = PTR_ERR(obj);
                                        intel_context_put(ce);
                                        kernel_context_close(ctx);
-                                       goto out_unlock;
+                                       goto out_file;
                                }
                        }
 
                                pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
                                       ndwords, dw, max_dwords(obj),
                                       engine->name,
-                                      yesno(!!ctx->vm), err);
+                                      yesno(!!rcu_access_pointer(ctx->vm)),
+                                      err);
                                intel_context_put(ce);
                                kernel_context_close(ctx);
-                               goto out_unlock;
+                               goto out_file;
                        }
 
                        err = throttle(ce, tq, ARRAY_SIZE(tq));
                        if (err) {
                                intel_context_put(ce);
                                kernel_context_close(ctx);
-                               goto out_unlock;
+                               goto out_file;
                        }
 
                        if (++dw == max_dwords(obj)) {
                        dw += rem;
                }
 
-out_unlock:
+out_file:
                throttle_release(tq, ARRAY_SIZE(tq));
                if (igt_live_test_end(&t))
                        err = -EIO;
-               mutex_unlock(&i915->drm.struct_mutex);
 
                mock_file_free(i915, file);
                if (err)
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       mutex_lock(&i915->drm.struct_mutex);
-
        parent = live_context(i915, file);
        if (IS_ERR(parent)) {
                err = PTR_ERR(parent);
-               goto out_unlock;
+               goto out_file;
        }
 
        if (!parent->vm) { /* not full-ppgtt; nothing to share */
                err = 0;
-               goto out_unlock;
+               goto out_file;
        }
 
        err = igt_live_test_begin(&t, i915, __func__, "");
        if (err)
-               goto out_unlock;
+               goto out_file;
 
        for_each_engine(engine, i915, id) {
                unsigned long ncontexts, ndwords, dw;
                                goto out_test;
                        }
 
+                       mutex_lock(&ctx->mutex);
                        __assign_ppgtt(ctx, parent->vm);
+                       mutex_unlock(&ctx->mutex);
 
                        ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
                        GEM_BUG_ON(IS_ERR(ce));
                                pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
                                       ndwords, dw, max_dwords(obj),
                                       engine->name,
-                                      yesno(!!ctx->vm), err);
+                                      yesno(!!rcu_access_pointer(ctx->vm)),
+                                      err);
                                intel_context_put(ce);
                                kernel_context_close(ctx);
                                goto out_test;
                        dw += rem;
                }
 
-               mutex_unlock(&i915->drm.struct_mutex);
                i915_gem_drain_freed_objects(i915);
-               mutex_lock(&i915->drm.struct_mutex);
        }
 out_test:
        throttle_release(tq, ARRAY_SIZE(tq));
        if (igt_live_test_end(&t))
                err = -EIO;
-out_unlock:
-       mutex_unlock(&i915->drm.struct_mutex);
-
+out_file:
        mock_file_free(i915, file);
        return err;
 }
        if (flags & TEST_RESET)
                igt_global_reset_lock(&i915->gt);
 
-       mutex_lock(&i915->drm.struct_mutex);
-
        ctx = live_context(i915, file);
        if (IS_ERR(ctx)) {
                ret = PTR_ERR(ctx);
        i915_gem_object_put(obj);
 
 out_unlock:
-       mutex_unlock(&i915->drm.struct_mutex);
-
        if (flags & TEST_RESET)
                igt_global_reset_unlock(&i915->gt);
 
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       mutex_lock(&i915->drm.struct_mutex);
-
        err = igt_live_test_begin(&t, i915, __func__, "");
        if (err)
-               goto out_unlock;
+               goto out_file;
 
        ctx = live_context(i915, file);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
-               goto out_unlock;
+               goto out_file;
        }
 
-       vm = ctx->vm ?: &i915->ggtt.alias->vm;
+       rcu_read_lock();
+       vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm;
        if (!vm || !vm->has_read_only) {
+               rcu_read_unlock();
                err = 0;
-               goto out_unlock;
+               goto out_file;
        }
+       rcu_read_unlock();
 
        ndwords = 0;
        dw = 0;
                                if (IS_ERR(obj)) {
                                        err = PTR_ERR(obj);
                                        i915_gem_context_unlock_engines(ctx);
-                                       goto out_unlock;
+                                       goto out_file;
                                }
 
                                if (prandom_u32_state(&prng) & 1)
                        if (err) {
                                pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
                                       ndwords, dw, max_dwords(obj),
-                                      ce->engine->name, yesno(!!ctx->vm), err);
+                                      ce->engine->name,
+                                      yesno(!!rcu_access_pointer(ctx->vm)),
+                                      err);
                                i915_gem_context_unlock_engines(ctx);
-                               goto out_unlock;
+                               goto out_file;
                        }
 
                        err = throttle(ce, tq, ARRAY_SIZE(tq));
                        if (err) {
                                i915_gem_context_unlock_engines(ctx);
-                               goto out_unlock;
+                               goto out_file;
                        }
 
                        if (++dw == max_dwords(obj)) {
                dw += rem;
        }
 
-out_unlock:
+out_file:
        throttle_release(tq, ARRAY_SIZE(tq));
        if (igt_live_test_end(&t))
                err = -EIO;
-       mutex_unlock(&i915->drm.struct_mutex);
 
        mock_file_free(i915, file);
        return err;
 }
 
-static int check_scratch(struct i915_gem_context *ctx, u64 offset)
+static int check_scratch(struct i915_address_space *vm, u64 offset)
 {
        struct drm_mm_node *node =
-               __drm_mm_interval_first(&ctx->vm->mm,
+               __drm_mm_interval_first(&vm->mm,
                                        offset, offset + sizeof(u32) - 1);
        if (!node || node->start > offset)
                return 0;
 {
        struct drm_i915_private *i915 = ctx->i915;
        struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
        struct i915_request *rq;
        struct i915_vma *vma;
        u32 *cmd;
 
        intel_gt_chipset_flush(engine->gt);
 
-       vma = i915_vma_instance(obj, ctx->vm, NULL);
+       vm = i915_gem_context_get_vm_rcu(ctx);
+       vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
-               goto err;
+               goto err_vm;
        }
 
        err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
        if (err)
-               goto err;
+               goto err_vm;
 
-       err = check_scratch(ctx, offset);
+       err = check_scratch(vm, offset);
        if (err)
                goto err_unpin;
 
 
        i915_request_add(rq);
 
+       i915_vm_put(vm);
        return 0;
 
 skip_request:
        i915_request_add(rq);
 err_unpin:
        i915_vma_unpin(vma);
+err_vm:
+       i915_vm_put(vm);
 err:
        i915_gem_object_put(obj);
        return err;
 {
        struct drm_i915_private *i915 = ctx->i915;
        struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
        const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
        const u32 result = 0x100;
        struct i915_request *rq;
 
        intel_gt_chipset_flush(engine->gt);
 
-       vma = i915_vma_instance(obj, ctx->vm, NULL);
+       vm = i915_gem_context_get_vm_rcu(ctx);
+       vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
-               goto err;
+               goto err_vm;
        }
 
        err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
        if (err)
-               goto err;
+               goto err_vm;
 
-       err = check_scratch(ctx, offset);
+       err = check_scratch(vm, offset);
        if (err)
                goto err_unpin;
 
        err = i915_gem_object_set_to_cpu_domain(obj, false);
        i915_gem_object_unlock(obj);
        if (err)
-               goto err;
+               goto err_vm;
 
        cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
        if (IS_ERR(cmd)) {
                err = PTR_ERR(cmd);
-               goto err;
+               goto err_vm;
        }
 
        *value = cmd[result / sizeof(*cmd)];
        i915_request_add(rq);
 err_unpin:
        i915_vma_unpin(vma);
+err_vm:
+       i915_vm_put(vm);
 err:
        i915_gem_object_put(obj);
        return err;
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       mutex_lock(&i915->drm.struct_mutex);
-
        err = igt_live_test_begin(&t, i915, __func__, "");
        if (err)
-               goto out_unlock;
+               goto out_file;
 
        ctx_a = live_context(i915, file);
        if (IS_ERR(ctx_a)) {
                err = PTR_ERR(ctx_a);
-               goto out_unlock;
+               goto out_file;
        }
 
        ctx_b = live_context(i915, file);
        if (IS_ERR(ctx_b)) {
                err = PTR_ERR(ctx_b);
-               goto out_unlock;
+               goto out_file;
        }
 
        /* We can only test vm isolation, if the vm are distinct */
        if (ctx_a->vm == ctx_b->vm)
-               goto out_unlock;
+               goto out_file;
 
        vm_total = ctx_a->vm->total;
        GEM_BUG_ON(ctx_b->vm->total != vm_total);
                                err = read_from_scratch(ctx_b, engine,
                                                        offset, &value);
                        if (err)
-                               goto out_unlock;
+                               goto out_file;
 
                        if (value) {
                                pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
                                       lower_32_bits(offset),
                                       this);
                                err = -EINVAL;
-                               goto out_unlock;
+                               goto out_file;
                        }
 
                        this++;
        pr_info("Checked %lu scratch offsets across %d engines\n",
                count, RUNTIME_INFO(i915)->num_engines);
 
-out_unlock:
+out_file:
        if (igt_live_test_end(&t))
                err = -EIO;
-       mutex_unlock(&i915->drm.struct_mutex);
-
        mock_file_free(i915, file);
        return err;
 }
         * a request; useful for retiring old state after loading new.
         */
 
-       mutex_lock(&i915->drm.struct_mutex);
-
        ctx = mock_context(i915, "mock");
-       if (!ctx) {
-               err = -ENOMEM;
-               goto unlock;
-       }
+       if (!ctx)
+               return -ENOMEM;
 
        counter = 0;
        err = context_barrier_task(ctx, 0,
 
 out:
        mock_context_close(ctx);
-unlock:
-       mutex_unlock(&i915->drm.struct_mutex);
        return err;
 #undef pr_fmt
 #define pr_fmt(x) x
 
                if (!ppgtt)
                        goto err_put;
 
+               mutex_lock(&ctx->mutex);
                __set_ppgtt(ctx, &ppgtt->vm);
+               mutex_unlock(&ctx->mutex);
+
                i915_vm_put(&ppgtt->vm);
        }
 
 
 void mock_init_contexts(struct drm_i915_private *i915)
 {
-       init_contexts(i915);
+       init_contexts(&i915->gem.contexts);
 }
 
 struct i915_gem_context *
        struct i915_gem_context *ctx;
        int err;
 
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
        ctx = i915_gem_create_context(i915, 0);
        if (IS_ERR(ctx))
                return ctx;
 
                   struct i915_gem_context *ctx,
                   struct intel_engine_cs *engine)
 {
+       struct i915_address_space *vm;
+
        GEM_BUG_ON(!engine->cops);
 
        kref_init(&ce->ref);
 
        ce->gem_context = ctx;
-       ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
+       rcu_read_lock();
+       vm = rcu_dereference(ctx->vm);
+       if (vm)
+               ce->vm = i915_vm_get(vm);
+       else
+               ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
+       rcu_read_unlock();
        if (ctx->timeline)
                ce->timeline = intel_timeline_get(ctx->timeline);
 
 
         * HW tries to write past the end of one.
         */
 
-       mutex_lock(>->i915->drm.struct_mutex);
-
        fixme = kernel_context(gt->i915);
-       if (IS_ERR(fixme)) {
-               err = PTR_ERR(fixme);
-               goto unlock;
-       }
+       if (IS_ERR(fixme))
+               return PTR_ERR(fixme);
 
        for_each_engine(engine, gt->i915, id) {
                struct {
        }
 
        kernel_context_close(fixme);
-unlock:
-       mutex_unlock(>->i915->drm.struct_mutex);
        return err;
 }
 
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       mutex_lock(>->i915->drm.struct_mutex);
-
        fixme = live_context(gt->i915, file);
        if (IS_ERR(fixme)) {
                err = PTR_ERR(fixme);
-               goto unlock;
+               goto out_file;
        }
 
        for_each_engine(engine, gt->i915, id) {
                        break;
        }
 
-unlock:
-       mutex_unlock(>->i915->drm.struct_mutex);
+out_file:
        mock_file_free(gt->i915, file);
        return err;
 }
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       mutex_lock(>->i915->drm.struct_mutex);
-
        fixme = live_context(gt->i915, file);
        if (IS_ERR(fixme)) {
                err = PTR_ERR(fixme);
-               goto unlock;
+               goto out_file;
        }
 
        for_each_engine(engine, gt->i915, id) {
                        break;
        }
 
-unlock:
-       mutex_unlock(>->i915->drm.struct_mutex);
+out_file:
        mock_file_free(gt->i915, file);
        return err;
 }
 
        memset(h, 0, sizeof(*h));
        h->gt = gt;
 
-       mutex_lock(>->i915->drm.struct_mutex);
        h->ctx = kernel_context(gt->i915);
-       mutex_unlock(>->i915->drm.struct_mutex);
        if (IS_ERR(h->ctx))
                return PTR_ERR(h->ctx);
 
 hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 {
        struct intel_gt *gt = h->gt;
-       struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
+       struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
        struct drm_i915_gem_object *obj;
        struct i915_request *rq = NULL;
        struct i915_vma *hws, *vma;
        int err;
 
        obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
-       if (IS_ERR(obj))
+       if (IS_ERR(obj)) {
+               i915_vm_put(vm);
                return ERR_CAST(obj);
+       }
 
        vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
        if (IS_ERR(vaddr)) {
                i915_gem_object_put(obj);
+               i915_vm_put(vm);
                return ERR_CAST(vaddr);
        }
 
        h->batch = vaddr;
 
        vma = i915_vma_instance(h->obj, vm, NULL);
-       if (IS_ERR(vma))
+       if (IS_ERR(vma)) {
+               i915_vm_put(vm);
                return ERR_CAST(vma);
+       }
 
        hws = i915_vma_instance(h->hws, vm, NULL);
-       if (IS_ERR(hws))
+       if (IS_ERR(hws)) {
+               i915_vm_put(vm);
                return ERR_CAST(hws);
+       }
 
        err = i915_vma_pin(vma, 0, 0, PIN_USER);
-       if (err)
+       if (err) {
+               i915_vm_put(vm);
                return ERR_PTR(err);
+       }
 
        err = i915_vma_pin(hws, 0, 0, PIN_USER);
        if (err)
        i915_vma_unpin(hws);
 unpin_vma:
        i915_vma_unpin(vma);
+       i915_vm_put(vm);
        return err ? ERR_PTR(err) : rq;
 }
 
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       mutex_lock(>->i915->drm.struct_mutex);
        ctx = live_context(gt->i915, file);
-       mutex_unlock(>->i915->drm.struct_mutex);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
                goto out;
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       mutex_lock(>->i915->drm.struct_mutex);
        ctx = live_context(gt->i915, file);
-       mutex_unlock(>->i915->drm.struct_mutex);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
                goto out;
                return PTR_ERR(file);
 
        for (count = 0; count < ARRAY_SIZE(ctx); count++) {
-               mutex_lock(&engine->i915->drm.struct_mutex);
                ctx[count] = live_context(engine->i915, file);
-               mutex_unlock(&engine->i915->drm.struct_mutex);
                if (IS_ERR(ctx[count])) {
                        err = PTR_ERR(ctx[count]);
                        while (--count)
 {
        struct intel_gt *gt = arg;
        struct i915_gem_context *ctx;
+       struct i915_address_space *vm;
        struct drm_file *file;
        int err;
 
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       mutex_lock(>->i915->drm.struct_mutex);
        ctx = live_context(gt->i915, file);
-       mutex_unlock(>->i915->drm.struct_mutex);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
                goto out;
        }
 
        err = 0;
-       if (ctx->vm) /* aliasing == global gtt locking, covered above */
-               err = __igt_reset_evict_vma(gt, ctx->vm,
+       vm = i915_gem_context_get_vm_rcu(ctx);
+       if (!i915_is_ggtt(vm)) {
+               /* aliasing == global gtt locking, covered above */
+               err = __igt_reset_evict_vma(gt, vm,
                                            evict_vma, EXEC_OBJECT_WRITE);
+       }
+       i915_vm_put(vm);
 
 out:
        mock_file_free(gt->i915, file);
 
        int err = 0;
 
        if (batch) {
-               vma = i915_vma_instance(batch, ctx->vm, NULL);
+               struct i915_address_space *vm;
+
+               vm = i915_gem_context_get_vm_rcu(ctx);
+               vma = i915_vma_instance(batch, vm, NULL);
+               i915_vm_put(vm);
                if (IS_ERR(vma))
                        return PTR_ERR(vma);
 
 
                rq = igt_spinner_create_request(spin, ce, MI_NOOP);
 
        intel_context_put(ce);
-       kernel_context_close(ctx);
 
        if (IS_ERR(rq)) {
                spin = NULL;
        if (err && spin)
                igt_spinner_end(spin);
 
+       kernel_context_close(ctx);
        return err;
 }
 
 static struct i915_vma *create_batch(struct i915_gem_context *ctx)
 {
        struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
        struct i915_vma *vma;
        int err;
 
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
-       vma = i915_vma_instance(obj, ctx->vm, NULL);
+       vm = i915_gem_context_get_vm_rcu(ctx);
+       vma = i915_vma_instance(obj, vm, NULL);
+       i915_vm_put(vm);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err_obj;
                0xffff00ff,
                0xffffffff,
        };
+       struct i915_address_space *vm;
        struct i915_vma *scratch;
        struct i915_vma *batch;
        int err = 0, i, v;
        u32 *cs, *results;
 
-       scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
+       vm = i915_gem_context_get_vm_rcu(ctx);
+       scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
+       i915_vm_put(vm);
        if (IS_ERR(scratch))
                return PTR_ERR(scratch);
 
                return 0;
 
        for (i = 0; i < ARRAY_SIZE(client); i++) {
+               struct i915_address_space *vm;
                struct i915_gem_context *c;
 
                c = kernel_context(i915);
                        goto err;
                }
 
-               client[i].scratch[0] = create_scratch(c->vm, 1024);
+               vm = i915_gem_context_get_vm_rcu(c);
+
+               client[i].scratch[0] = create_scratch(vm, 1024);
                if (IS_ERR(client[i].scratch[0])) {
                        err = PTR_ERR(client[i].scratch[0]);
+                       i915_vm_put(vm);
                        kernel_context_close(c);
                        goto err;
                }
 
-               client[i].scratch[1] = create_scratch(c->vm, 1024);
+               client[i].scratch[1] = create_scratch(vm, 1024);
                if (IS_ERR(client[i].scratch[1])) {
                        err = PTR_ERR(client[i].scratch[1]);
                        i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+                       i915_vm_put(vm);
                        kernel_context_close(c);
                        goto err;
                }
 
                client[i].ctx = c;
+               i915_vm_put(vm);
        }
 
        for_each_engine(engine, i915, id) {
 
                                          struct i915_gem_context *ctx)
 {
        struct intel_vgpu_mm *mm = workload->shadow_mm;
-       struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
+       struct i915_ppgtt *ppgtt =
+               i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
        int i = 0;
 
        if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
                        px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
                }
        }
+
+       i915_vm_put(&ppgtt->vm);
 }
 
 static int
        struct intel_vgpu_submission *s = &vgpu->submission;
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
+       struct i915_ppgtt *ppgtt;
        enum intel_engine_id i;
        int ret;
 
-       mutex_lock(&i915->drm.struct_mutex);
-
        ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
-       if (IS_ERR(ctx)) {
-               ret = PTR_ERR(ctx);
-               goto out_unlock;
-       }
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
 
        i915_gem_context_set_force_single_submission(ctx);
 
-       i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
+       ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
+       i915_context_ppgtt_root_save(s, ppgtt);
 
        for_each_engine(engine, i915, i) {
                struct intel_context *ce;
        atomic_set(&s->running_workload_num, 0);
        bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
 
+       i915_vm_put(&ppgtt->vm);
        i915_gem_context_put(ctx);
-       mutex_unlock(&i915->drm.struct_mutex);
        return 0;
 
 out_shadow_ctx:
-       i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
+       i915_context_ppgtt_root_restore(s, ppgtt);
        for_each_engine(engine, i915, i) {
                if (IS_ERR(s->shadow[i]))
                        break;
                intel_context_unpin(s->shadow[i]);
                intel_context_put(s->shadow[i]);
        }
+       i915_vm_put(&ppgtt->vm);
        i915_gem_context_put(ctx);
-out_unlock:
-       mutex_unlock(&i915->drm.struct_mutex);
        return ret;
 }
 
 
                                struct drm_i915_private *i915)
 {
        struct file_stats kstats = {};
-       struct i915_gem_context *ctx;
+       struct i915_gem_context *ctx, *cn;
 
-       list_for_each_entry(ctx, &i915->contexts.list, link) {
+       spin_lock(&i915->gem.contexts.lock);
+       list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
                struct i915_gem_engines_iter it;
                struct intel_context *ce;
 
+               if (!kref_get_unless_zero(&ctx->ref))
+                       continue;
+
+               spin_unlock(&i915->gem.contexts.lock);
+
                for_each_gem_engine(ce,
                                    i915_gem_context_lock_engines(ctx), it) {
                        intel_context_lock_pinned(ce);
                i915_gem_context_unlock_engines(ctx);
 
                if (!IS_ERR_OR_NULL(ctx->file_priv)) {
-                       struct file_stats stats = { .vm = ctx->vm, };
+                       struct file_stats stats = {
+                               .vm = rcu_access_pointer(ctx->vm),
+                       };
                        struct drm_file *file = ctx->file_priv->file;
                        struct task_struct *task;
                        char name[80];
 
                        print_file_stats(m, name, stats);
                }
+
+               spin_lock(&i915->gem.contexts.lock);
+               list_safe_reset_next(ctx, cn, link);
+               i915_gem_context_put(ctx);
        }
+       spin_unlock(&i915->gem.contexts.lock);
 
        print_file_stats(m, "[k]contexts", kstats);
 }
 static int i915_gem_object_info(struct seq_file *m, void *data)
 {
        struct drm_i915_private *i915 = node_to_i915(m->private);
-       int ret;
 
        seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
                   i915->mm.shrink_count,
 
        seq_putc(m, '\n');
 
-       ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
-       if (ret)
-               return ret;
-
        print_context_stats(m, i915);
-       mutex_unlock(&i915->drm.struct_mutex);
 
        return 0;
 }
 
 static int i915_context_status(struct seq_file *m, void *unused)
 {
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct drm_device *dev = &dev_priv->drm;
-       struct i915_gem_context *ctx;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
+       struct drm_i915_private *i915 = node_to_i915(m->private);
+       struct i915_gem_context *ctx, *cn;
 
-       list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+       spin_lock(&i915->gem.contexts.lock);
+       list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
                struct i915_gem_engines_iter it;
                struct intel_context *ce;
 
+               if (!kref_get_unless_zero(&ctx->ref))
+                       continue;
+
+               spin_unlock(&i915->gem.contexts.lock);
+
                seq_puts(m, "HW context ");
                if (ctx->pid) {
                        struct task_struct *task;
                i915_gem_context_unlock_engines(ctx);
 
                seq_putc(m, '\n');
-       }
 
-       mutex_unlock(&dev->struct_mutex);
+               spin_lock(&i915->gem.contexts.lock);
+               list_safe_reset_next(ctx, cn, link);
+               i915_gem_context_put(ctx);
+       }
+       spin_unlock(&i915->gem.contexts.lock);
 
        return 0;
 }
 
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
-       mutex_lock(&dev->struct_mutex);
        i915_gem_context_close(file);
        i915_gem_release(dev, file);
-       mutex_unlock(&dev->struct_mutex);
 
        kfree_rcu(file_priv, rcu);
 
 
        int audio_power_refcount;
        u32 audio_freq_cntrl;
 
-       struct {
-               struct mutex mutex;
-               struct list_head list;
-               struct llist_head free_list;
-               struct work_struct free_work;
-       } contexts;
-
        u32 fdi_rx_config;
 
        /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
 
        struct {
                struct notifier_block pm_notifier;
+
+               struct i915_gem_contexts {
+                       spinlock_t lock; /* locks list */
+                       struct list_head list;
+
+                       struct llist_head free_list;
+                       struct work_struct free_work;
+               } contexts;
        } gem;
 
        /* For i945gm vblank irq vs. C3 workaround */
 
                goto err_unlock;
        }
 
-       ret = i915_gem_contexts_init(dev_priv);
+       ret = i915_gem_init_contexts(dev_priv);
        if (ret) {
                GEM_BUG_ON(ret == -EIO);
                goto err_scratch;
        }
 err_context:
        if (ret != -EIO)
-               i915_gem_contexts_fini(dev_priv);
+               i915_gem_driver_release__contexts(dev_priv);
 err_scratch:
        intel_gt_driver_release(&dev_priv->gt);
 err_unlock:
 
 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 {
-       mutex_lock(&dev_priv->drm.struct_mutex);
        intel_engines_cleanup(dev_priv);
-       i915_gem_contexts_fini(dev_priv);
+       i915_gem_driver_release__contexts(dev_priv);
        intel_gt_driver_release(&dev_priv->gt);
-       mutex_unlock(&dev_priv->drm.struct_mutex);
 
        intel_wa_list_free(&dev_priv->gt_wa_list);
 
 
        i915_gem_drain_freed_objects(dev_priv);
 
-       WARN_ON(!list_empty(&dev_priv->contexts.list));
+       WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
 }
 
 void i915_gem_init_mmio(struct drm_i915_private *i915)
 
        if (vm->has_read_only &&
            vm->i915->kernel_context &&
            vm->i915->kernel_context->vm) {
-               struct i915_address_space *clone = vm->i915->kernel_context->vm;
+               struct i915_address_space *clone =
+                       rcu_dereference_protected(vm->i915->kernel_context->vm,
+                                                 true); /* static */
 
                GEM_BUG_ON(!clone->has_read_only);
 
 
        };
 #undef ctx_flexeuN
        struct intel_engine_cs *engine;
-       struct i915_gem_context *ctx;
-       int i;
+       struct i915_gem_context *ctx, *cn;
+       int i, err;
 
        for (i = 2; i < ARRAY_SIZE(regs); i++)
                regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
         * context. Contexts idle at the time of reconfiguration are not
         * trapped behind the barrier.
         */
-       list_for_each_entry(ctx, &i915->contexts.list, link) {
-               int err;
-
+       spin_lock(&i915->gem.contexts.lock);
+       list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
                if (ctx == i915->kernel_context)
                        continue;
 
+               if (!kref_get_unless_zero(&ctx->ref))
+                       continue;
+
+               spin_unlock(&i915->gem.contexts.lock);
+
                err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs));
-               if (err)
+               if (err) {
+                       i915_gem_context_put(ctx);
                        return err;
+               }
+
+               spin_lock(&i915->gem.contexts.lock);
+               list_safe_reset_next(ctx, cn, link);
+               i915_gem_context_put(ctx);
        }
+       spin_unlock(&i915->gem.contexts.lock);
 
        /*
         * After updating all other contexts, we need to modify ourselves.
         */
        for_each_uabi_engine(engine, i915) {
                struct intel_context *ce = engine->kernel_context;
-               int err;
 
                if (engine->class != RENDER_CLASS)
                        continue;
 
        count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
        memset(buf, 0, count);
 
-       ret = i915_mutex_lock_interruptible(&i915->drm);
-       if (ret)
-               return ret;
-
+       spin_lock(&i915->gem.contexts.lock);
        if (i915->l3_parity.remap_info[slice])
                memcpy(buf,
                       i915->l3_parity.remap_info[slice] + offset / sizeof(u32),
                       count);
-
-       mutex_unlock(&i915->drm.struct_mutex);
+       spin_unlock(&i915->gem.contexts.lock);
 
        return count;
 }
        struct device *kdev = kobj_to_dev(kobj);
        struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
        int slice = (int)(uintptr_t)attr->private;
+       u32 *remap_info, *freeme = NULL;
        struct i915_gem_context *ctx;
-       u32 **remap_info;
        int ret;
 
        ret = l3_access_valid(i915, offset);
        if (count < sizeof(u32))
                return -EINVAL;
 
-       ret = i915_mutex_lock_interruptible(&i915->drm);
-       if (ret)
-               return ret;
+       remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+       if (!remap_info)
+               return -ENOMEM;
 
-       remap_info = &i915->l3_parity.remap_info[slice];
-       if (!*remap_info) {
-               *remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
-               if (!*remap_info) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
+       spin_lock(&i915->gem.contexts.lock);
+
+       if (i915->l3_parity.remap_info[slice]) {
+               freeme = remap_info;
+               remap_info = i915->l3_parity.remap_info[slice];
+       } else {
+               i915->l3_parity.remap_info[slice] = remap_info;
        }
 
        count = round_down(count, sizeof(u32));
-       memcpy(*remap_info + offset / sizeof(u32), buf, count);
+       memcpy(remap_info + offset / sizeof(u32), buf, count);
 
        /* NB: We defer the remapping until we switch to the context */
-       list_for_each_entry(ctx, &i915->contexts.list, link)
+       list_for_each_entry(ctx, &i915->gem.contexts.list, link)
                ctx->remap_slice |= BIT(slice);
 
+       spin_unlock(&i915->gem.contexts.lock);
+       kfree(freeme);
+
        /*
         * TODO: Ideally we really want a GPU reset here to make sure errors
         * aren't propagated. Since I cannot find a stable way to reset the GPU
         * at this point it is left as a TODO.
        */
 
-       ret = count;
-out:
-       mutex_unlock(&i915->drm.struct_mutex);
-
-       return ret;
+       return count;
 }
 
 static const struct bin_attribute dpf_attrs = {
 
        TP_fast_assign(
                        __entry->dev = ctx->i915->drm.primary->index;
                        __entry->ctx = ctx;
-                       __entry->vm = ctx->vm;
+                       __entry->vm = rcu_access_pointer(ctx->vm);
        ),
 
        TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
 
                return PTR_ERR(file);
 
        err = -ENOMEM;
-       mutex_lock(&i915->drm.struct_mutex);
        ctx = live_context(i915, file);
        if (!IS_ERR(ctx))
                err = switch_to_context(i915, ctx);
-       mutex_unlock(&i915->drm.struct_mutex);
        if (err)
                goto out;
 
 
        pm_resume(i915);
 
-       mutex_lock(&i915->drm.struct_mutex);
        err = switch_to_context(i915, ctx);
-       mutex_unlock(&i915->drm.struct_mutex);
 out:
        mock_file_free(i915, file);
        return err;
                return PTR_ERR(file);
 
        err = -ENOMEM;
-       mutex_lock(&i915->drm.struct_mutex);
        ctx = live_context(i915, file);
        if (!IS_ERR(ctx))
                err = switch_to_context(i915, ctx);
-       mutex_unlock(&i915->drm.struct_mutex);
        if (err)
                goto out;
 
 
        pm_resume(i915);
 
-       mutex_lock(&i915->drm.struct_mutex);
        err = switch_to_context(i915, ctx);
-       mutex_unlock(&i915->drm.struct_mutex);
 out:
        mock_file_free(i915, file);
        return err;
 
                }
 
                count = 0;
-               mutex_lock(&i915->drm.struct_mutex);
                onstack_fence_init(&fence);
                do {
                        struct i915_request *rq;
                        count++;
                        err = 0;
                } while(1);
-               mutex_unlock(&i915->drm.struct_mutex);
-
                onstack_fence_fini(&fence);
                pr_info("Submitted %lu contexts/requests on %s\n",
                        count, engine->name);
 
                                     unsigned long end_time))
 {
        const u64 limit = totalram_pages() << PAGE_SHIFT;
+       struct i915_address_space *vm;
        struct i915_gem_context *ctx;
        IGT_TIMEOUT(end_time);
        int err;
        if (!ctx)
                return -ENOMEM;
 
-       err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
+       vm = i915_gem_context_get_vm_rcu(ctx);
+       err = func(i915, vm, 0, min(vm->total, limit), end_time);
+       i915_vm_put(vm);
 
        mock_context_close(ctx);
        return err;
                goto out_unlock;
        }
 
-       vm = ctx->vm;
-       if (!vm)
-               goto out_unlock;
+       vm = i915_gem_context_get_vm_rcu(ctx);
+       if (i915_is_ggtt(vm))
+               goto out_vm;
 
        /* Create two pages; dummy we prefill the TLB, and intended */
        bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
        if (IS_ERR(bbe)) {
                err = PTR_ERR(bbe);
-               goto out_unlock;
+               goto out_vm;
        }
 
        batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
        i915_gem_object_put(act);
 out_put_bbe:
        i915_gem_object_put(bbe);
+out_vm:
+       i915_vm_put(vm);
 out_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
        mock_file_free(i915, file);
 
        struct intel_context *ce;
        int err = -EINVAL;
 
-       mutex_lock(&i915->drm.struct_mutex);
        ctx[0] = mock_context(i915, "A");
-       mutex_unlock(&i915->drm.struct_mutex);
 
        ce = i915_gem_context_get_engine(ctx[0], RCS0);
        GEM_BUG_ON(IS_ERR(ce));
        i915_request_get(request);
        i915_request_add(request);
 
-       mutex_lock(&i915->drm.struct_mutex);
        ctx[1] = mock_context(i915, "B");
-       mutex_unlock(&i915->drm.struct_mutex);
 
        ce = i915_gem_context_get_engine(ctx[1], RCS0);
        GEM_BUG_ON(IS_ERR(ce));
        }
 
        for (n = 0; n < t.ncontexts; n++) {
-               mutex_lock(&t.engine->i915->drm.struct_mutex);
                t.contexts[n] = mock_context(t.engine->i915, "mock");
-               mutex_unlock(&t.engine->i915->drm.struct_mutex);
                if (!t.contexts[n]) {
                        ret = -ENOMEM;
                        goto out_contexts;
 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 {
        struct i915_gem_context *ctx = i915->kernel_context;
-       struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
        struct drm_i915_gem_object *obj;
        const int gen = INTEL_GEN(i915);
+       struct i915_address_space *vm;
        struct i915_vma *vma;
        u32 *cmd;
        int err;
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
+       vm = i915_gem_context_get_vm_rcu(ctx);
        vma = i915_vma_instance(obj, vm, NULL);
+       i915_vm_put(vm);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err;
        }
 
        for (n = 0; n < t[0].ncontexts; n++) {
-               mutex_lock(&i915->drm.struct_mutex);
                t[0].contexts[n] = live_context(i915, file);
-               mutex_unlock(&i915->drm.struct_mutex);
                if (!t[0].contexts[n]) {
                        ret = -ENOMEM;
                        goto out_contexts;
 
 
 #include <linux/prime_numbers.h>
 
+#include "gem/i915_gem_context.h"
 #include "gem/selftests/mock_context.h"
 
 #include "i915_scatterlist.h"
 {
        bool ok = true;
 
-       if (vma->vm != ctx->vm) {
+       if (vma->vm != rcu_access_pointer(ctx->vm)) {
                pr_err("VMA created with wrong VM\n");
                ok = false;
        }
        list_for_each_entry(obj, objects, st_link) {
                for (pinned = 0; pinned <= 1; pinned++) {
                        list_for_each_entry(ctx, contexts, link) {
-                               struct i915_address_space *vm = ctx->vm;
+                               struct i915_address_space *vm;
                                struct i915_vma *vma;
                                int err;
 
+                               vm = i915_gem_context_get_vm_rcu(ctx);
                                vma = checked_vma_instance(obj, vm, NULL);
+                               i915_vm_put(vm);
                                if (IS_ERR(vma))
                                        return PTR_ERR(vma);
 
 
 
        i915_gem_drain_workqueue(i915);
 
-       mutex_lock(&i915->drm.struct_mutex);
        for_each_engine(engine, i915, id)
                mock_engine_free(engine);
-       i915_gem_contexts_fini(i915);
-       mutex_unlock(&i915->drm.struct_mutex);
+       i915_gem_driver_release__contexts(i915);
 
        intel_timelines_fini(i915);
 
        return i915;
 
 err_context:
-       i915_gem_contexts_fini(i915);
+       i915_gem_driver_release__contexts(i915);
 err_engine:
        mock_engine_free(i915->engine[RCS0]);
 err_unlock: