void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
                                         struct sg_table *pages);
 
+static inline struct i915_gem_context *
+__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
+{
+       return idr_find(&file_priv->context_idr, id);
+}
+
 static inline struct i915_gem_context *
 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
 {
        struct i915_gem_context *ctx;
 
-       lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
-
-       ctx = idr_find(&file_priv->context_idr, id);
-       if (!ctx)
-               return ERR_PTR(-ENOENT);
+       rcu_read_lock();
+       ctx = __i915_gem_context_lookup_rcu(file_priv, id);
+       if (ctx && !kref_get_unless_zero(&ctx->ref))
+               ctx = NULL;
+       rcu_read_unlock();
 
        return ctx;
 }
 
        list_del(&ctx->link);
 
        ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
-       kfree(ctx);
+       kfree_rcu(ctx, rcu);
 }
 
 static void contexts_free(struct drm_i915_private *i915)
        if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
                return -ENOENT;
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-
        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
-       if (IS_ERR(ctx)) {
-               mutex_unlock(&dev->struct_mutex);
-               return PTR_ERR(ctx);
-       }
+       if (!ctx)
+               return -ENOENT;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               goto out;
 
        __destroy_hw_context(ctx, file_priv);
        mutex_unlock(&dev->struct_mutex);
 
-       DRM_DEBUG("HW context %d destroyed\n", args->ctx_id);
+out:
+       i915_gem_context_put(ctx);
        return 0;
 }
 
        struct drm_i915_file_private *file_priv = file->driver_priv;
        struct drm_i915_gem_context_param *args = data;
        struct i915_gem_context *ctx;
-       int ret;
-
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
+       int ret = 0;
 
        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
-       if (IS_ERR(ctx)) {
-               mutex_unlock(&dev->struct_mutex);
-               return PTR_ERR(ctx);
-       }
+       if (!ctx)
+               return -ENOENT;
 
        args->size = 0;
        switch (args->param) {
                ret = -EINVAL;
                break;
        }
-       mutex_unlock(&dev->struct_mutex);
 
+       i915_gem_context_put(ctx);
        return ret;
 }
 
        struct i915_gem_context *ctx;
        int ret;
 
+       ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+       if (!ctx)
+               return -ENOENT;
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
-               return ret;
-
-       ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
-       if (IS_ERR(ctx)) {
-               mutex_unlock(&dev->struct_mutex);
-               return PTR_ERR(ctx);
-       }
+               goto out;
 
        switch (args->param) {
        case I915_CONTEXT_PARAM_BAN_PERIOD:
        }
        mutex_unlock(&dev->struct_mutex);
 
+out:
+       i915_gem_context_put(ctx);
        return ret;
 }
 
        if (args->flags || args->pad)
                return -EINVAL;
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
+       ret = -ENOENT;
+       rcu_read_lock();
+       ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
+       if (!ctx)
+               goto out;
 
-       ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
-       if (IS_ERR(ctx)) {
-               mutex_unlock(&dev->struct_mutex);
-               return PTR_ERR(ctx);
-       }
+       /*
+        * We opt for unserialised reads here. This may result in tearing
+        * in the extremely unlikely event of a GPU hang on this context
+        * as we are querying them. If we need that extra layer of protection,
+        * we should wrap the hangstats with a seqlock.
+        */
 
        if (capable(CAP_SYS_ADMIN))
                args->reset_count = i915_reset_count(&dev_priv->gpu_error);
        else
                args->reset_count = 0;
 
-       args->batch_active = ctx->guilty_count;
-       args->batch_pending = ctx->active_count;
-
-       mutex_unlock(&dev->struct_mutex);
+       args->batch_active = READ_ONCE(ctx->guilty_count);
+       args->batch_pending = READ_ONCE(ctx->active_count);
 
-       return 0;
+       ret = 0;
+out:
+       rcu_read_unlock();
+       return ret;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 
        struct i915_gem_context *ctx;
 
        ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
-       if (unlikely(IS_ERR(ctx)))
-               return PTR_ERR(ctx);
+       if (unlikely(!ctx))
+               return -ENOENT;
 
        if (unlikely(i915_gem_context_is_banned(ctx))) {
                DRM_DEBUG("Context %u tried to submit while banned\n",
                          ctx->user_handle);
+               i915_gem_context_put(ctx);
                return -EIO;
        }
 
-       eb->ctx = i915_gem_context_get(ctx);
+       eb->ctx = ctx;
        eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
 
        eb->context_flags = 0;
        if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
                args->flags |= __EXEC_HAS_RELOC;
        eb.exec = exec;
-       eb.ctx = NULL;
        eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
        if (USES_FULL_PPGTT(eb.i915))
                eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
        if (eb_create(&eb))
                return -ENOMEM;
 
+       err = eb_select_context(&eb);
+       if (unlikely(err))
+               goto err_destroy;
+
        /*
         * Take a local wakeref for preparing to dispatch the execbuf as
         * we expect to access the hardware fairly frequently in the
         * 100ms.
         */
        intel_runtime_pm_get(eb.i915);
+
        err = i915_mutex_lock_interruptible(dev);
        if (err)
                goto err_rpm;
 
-       err = eb_select_context(&eb);
-       if (unlikely(err))
-               goto err_unlock;
-
        err = eb_relocate(&eb);
        if (err)
                /*
 err_vma:
        if (eb.exec)
                eb_release_vmas(&eb);
-       i915_gem_context_put(eb.ctx);
-err_unlock:
        mutex_unlock(&dev->struct_mutex);
 err_rpm:
        intel_runtime_pm_put(eb.i915);
+       i915_gem_context_put(eb.ctx);
+err_destroy:
        eb_destroy(&eb);
        if (out_fence_fd != -1)
                put_unused_fd(out_fence_fd);