obj->base.write_domain);
        for_each_engine_id(engine, dev_priv, id)
                seq_printf(m, "%x ",
-                          i915_gem_active_get_seqno(&obj->last_read[id]));
+                          i915_gem_active_get_seqno(&obj->last_read[id],
+                                                    &obj->base.dev->struct_mutex));
        seq_printf(m, "] %x %x%s%s%s",
-                  i915_gem_active_get_seqno(&obj->last_write),
-                  i915_gem_active_get_seqno(&obj->last_fence),
+                  i915_gem_active_get_seqno(&obj->last_write,
+                                            &obj->base.dev->struct_mutex),
+                  i915_gem_active_get_seqno(&obj->last_fence,
+                                            &obj->base.dev->struct_mutex),
                   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
                   obj->dirty ? " dirty" : "",
                   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
                seq_printf(m, " (%s mappable)", s);
        }
 
-       engine = i915_gem_active_get_engine(&obj->last_write);
+       engine = i915_gem_active_get_engine(&obj->last_write,
+                                           &obj->base.dev->struct_mutex);
        if (engine)
                seq_printf(m, " (%s)", engine->name);
 
 
        int ret, i;
 
        if (readonly) {
-               request = i915_gem_active_peek(&obj->last_write);
+               request = i915_gem_active_peek(&obj->last_write,
+                                              &obj->base.dev->struct_mutex);
                if (request) {
                        ret = i915_wait_request(request);
                        if (ret)
                                return ret;
 
                        i = request->engine->id;
-                       if (i915_gem_active_peek(&obj->last_read[i]) == request)
+                       if (i915_gem_active_peek(&obj->last_read[i],
+                                                &obj->base.dev->struct_mutex) == request)
                                i915_gem_object_retire__read(obj, i);
                        else
                                i915_gem_object_retire__write(obj);
                }
        } else {
                for (i = 0; i < I915_NUM_ENGINES; i++) {
-                       request = i915_gem_active_peek(&obj->last_read[i]);
+                       request = i915_gem_active_peek(&obj->last_read[i],
+                                                      &obj->base.dev->struct_mutex);
                        if (!request)
                                continue;
 
 {
        int idx = req->engine->id;
 
-       if (i915_gem_active_peek(&obj->last_read[idx]) == req)
+       if (i915_gem_active_peek(&obj->last_read[idx],
+                                &obj->base.dev->struct_mutex) == req)
                i915_gem_object_retire__read(obj, idx);
-       else if (i915_gem_active_peek(&obj->last_write) == req)
+       else if (i915_gem_active_peek(&obj->last_write,
+                                     &obj->base.dev->struct_mutex) == req)
                i915_gem_object_retire__write(obj);
 
        if (!i915_reset_in_progress(&req->i915->gpu_error))
        if (readonly) {
                struct drm_i915_gem_request *req;
 
-               req = i915_gem_active_get(&obj->last_write);
+               req = i915_gem_active_get(&obj->last_write,
+                                         &obj->base.dev->struct_mutex);
                if (req == NULL)
                        return 0;
 
                for (i = 0; i < I915_NUM_ENGINES; i++) {
                        struct drm_i915_gem_request *req;
 
-                       req = i915_gem_active_get(&obj->last_read[i]);
+                       req = i915_gem_active_get(&obj->last_read[i],
+                                                 &obj->base.dev->struct_mutex);
                        if (req == NULL)
                                continue;
 
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 {
        GEM_BUG_ON(!i915_gem_active_isset(&obj->last_write));
-       GEM_BUG_ON(!(obj->active & intel_engine_flag(i915_gem_active_get_engine(&obj->last_write))));
+       GEM_BUG_ON(!(obj->active &
+                    intel_engine_flag(i915_gem_active_get_engine(&obj->last_write,
+                                                                 &obj->base.dev->struct_mutex))));
 
        i915_gem_active_set(&obj->last_write, NULL);
        intel_fb_obj_flush(obj, true, ORIGIN_CS);
        list_del_init(&obj->engine_list[idx]);
        i915_gem_active_set(&obj->last_read[idx], NULL);
 
-       engine = i915_gem_active_get_engine(&obj->last_write);
+       engine = i915_gem_active_get_engine(&obj->last_write,
+                                           &obj->base.dev->struct_mutex);
        if (engine && engine->id == idx)
                i915_gem_object_retire__write(obj);
 
                                       struct drm_i915_gem_object,
                                       engine_list[engine->id]);
 
-               if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id])->list))
+               if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id],
+                                                     &obj->base.dev->struct_mutex)->list))
                        break;
 
                i915_gem_object_retire__read(obj, engine->id);
        for (i = 0; i < I915_NUM_ENGINES; i++) {
                struct drm_i915_gem_request *req;
 
-               req = i915_gem_active_peek(&obj->last_read[i]);
+               req = i915_gem_active_peek(&obj->last_read[i],
+                                          &obj->base.dev->struct_mutex);
                if (req == NULL)
                        continue;
 
        for (i = 0; i < I915_NUM_ENGINES; i++) {
                struct drm_i915_gem_request *req;
 
-               req = i915_gem_active_get(&obj->last_read[i]);
+               req = i915_gem_active_get(&obj->last_read[i],
+                                         &obj->base.dev->struct_mutex);
                if (req)
                        requests[n++] = req;
        }
        if (readonly) {
                struct drm_i915_gem_request *req;
 
-               req = i915_gem_active_peek(&obj->last_write);
+               req = i915_gem_active_peek(&obj->last_write,
+                                          &obj->base.dev->struct_mutex);
                if (req)
                        requests[n++] = req;
        } else {
                for (i = 0; i < I915_NUM_ENGINES; i++) {
                        struct drm_i915_gem_request *req;
 
-                       req = i915_gem_active_peek(&obj->last_read[i]);
+                       req = i915_gem_active_peek(&obj->last_read[i],
+                                                  &obj->base.dev->struct_mutex);
                        if (req)
                                requests[n++] = req;
                }
                int i;
 
                for (i = 0; i < I915_NUM_ENGINES; i++) {
-                       req = i915_gem_active_peek(&obj->last_read[i]);
+                       req = i915_gem_active_peek(&obj->last_read[i],
+                                                  &obj->base.dev->struct_mutex);
                        if (req)
                                args->busy |= 1 << (16 + req->engine->exec_id);
                }
-               req = i915_gem_active_peek(&obj->last_write);
+               req = i915_gem_active_peek(&obj->last_write,
+                                          &obj->base.dev->struct_mutex);
                if (req)
                        args->busy |= req->engine->exec_id;
        }
 
 {
        int ret;
 
-       ret = i915_gem_active_wait(&obj->last_fence);
+       ret = i915_gem_active_wait(&obj->last_fence,
+                                  &obj->base.dev->struct_mutex);
        if (ret)
                return ret;
 
 
        i915_gem_request_assign(&active->request, request);
 }
 
+static inline struct drm_i915_gem_request *
+__i915_gem_active_peek(const struct i915_gem_active *active)
+{
+       return active->request;
+}
+
 /**
  * i915_gem_active_peek - report the request being monitored
  * @active - the active tracker
  * caller must hold struct_mutex.
  */
 static inline struct drm_i915_gem_request *
-i915_gem_active_peek(const struct i915_gem_active *active)
+i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
 {
        return active->request;
 }
  * if the active tracker is idle. The caller must hold struct_mutex.
  */
 static inline struct drm_i915_gem_request *
-i915_gem_active_get(const struct i915_gem_active *active)
+i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
 {
        struct drm_i915_gem_request *request;
 
-       request = i915_gem_active_peek(active);
+       request = i915_gem_active_peek(active, mutex);
        if (!request || i915_gem_request_completed(request))
                return NULL;
 
  * the caller to hold struct_mutex (but that can be relaxed if desired).
  */
 static inline bool
-i915_gem_active_is_idle(const struct i915_gem_active *active)
+i915_gem_active_is_idle(const struct i915_gem_active *active,
+                       struct mutex *mutex)
 {
        struct drm_i915_gem_request *request;
 
-       request = i915_gem_active_peek(active);
+       request = i915_gem_active_peek(active, mutex);
        if (!request || i915_gem_request_completed(request))
                return true;
 
  * retired first, see i915_gem_active_retire().
  */
 static inline int __must_check
-i915_gem_active_wait(const struct i915_gem_active *active)
+i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
 {
        struct drm_i915_gem_request *request;
 
-       request = i915_gem_active_peek(active);
+       request = i915_gem_active_peek(active, mutex);
        if (!request)
                return 0;
 
  * tracker is idle, the function returns immediately.
  */
 static inline int __must_check
-i915_gem_active_retire(const struct i915_gem_active *active)
+i915_gem_active_retire(const struct i915_gem_active *active,
+                      struct mutex *mutex)
 {
-       return i915_gem_active_wait(active);
+       return i915_gem_active_wait(active, mutex);
 }
 
 /* Convenience functions for peeking at state inside active's request whilst
  */
 
 static inline uint32_t
-i915_gem_active_get_seqno(const struct i915_gem_active *active)
+i915_gem_active_get_seqno(const struct i915_gem_active *active,
+                         struct mutex *mutex)
 {
-       return i915_gem_request_get_seqno(i915_gem_active_peek(active));
+       return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
 }
 
 static inline struct intel_engine_cs *
-i915_gem_active_get_engine(const struct i915_gem_active *active)
+i915_gem_active_get_engine(const struct i915_gem_active *active,
+                          struct mutex *mutex)
 {
-       return i915_gem_request_get_engine(i915_gem_active_peek(active));
+       return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
 }
 
 #define for_each_active(mask, idx) \
 
                        }
 
                        obj->fence_dirty =
-                               !i915_gem_active_is_idle(&obj->last_fence) ||
+                               !i915_gem_active_is_idle(&obj->last_fence,
+                                                        &dev->struct_mutex) ||
                                obj->fence_reg != I915_FENCE_REG_NONE;
 
                        obj->tiling_mode = args->tiling_mode;
 
        for (i = 0; i < I915_NUM_ENGINES; i++) {
                struct drm_i915_gem_request *req;
 
-               req = i915_gem_active_get(&obj->last_read[i]);
+               req = i915_gem_active_get(&obj->last_read[i],
+                                         &obj->base.dev->struct_mutex);
                if (req)
                        requests[n++] = req;
        }
 
 #define i915_error_ggtt_object_create(dev_priv, src) \
        i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
 
+/* The error capture is special as tries to run underneath the normal
+ * locking rules - so we use the raw version of the i915_gem_active lookup.
+ */
+static inline uint32_t
+__active_get_seqno(struct i915_gem_active *active)
+{
+       return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+}
+
+static inline int
+__active_get_engine_id(struct i915_gem_active *active)
+{
+       struct intel_engine_cs *engine;
+
+       engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
+       return engine ? engine->id : -1;
+}
+
 static void capture_bo(struct drm_i915_error_buffer *err,
                       struct i915_vma *vma)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       struct intel_engine_cs *engine;
        int i;
 
        err->size = obj->base.size;
        err->name = obj->base.name;
+
        for (i = 0; i < I915_NUM_ENGINES; i++)
-               err->rseqno[i] = i915_gem_active_get_seqno(&obj->last_read[i]);
-       err->wseqno = i915_gem_active_get_seqno(&obj->last_write);
+               err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
+       err->wseqno = __active_get_seqno(&obj->last_write);
+       err->engine = __active_get_engine_id(&obj->last_write);
+
        err->gtt_offset = vma->node.start;
        err->read_domains = obj->base.read_domains;
        err->write_domain = obj->base.write_domain;
        err->purgeable = obj->madv != I915_MADV_WILLNEED;
        err->userptr = obj->userptr.mm != NULL;
        err->cache_level = obj->cache_level;
-
-       engine = i915_gem_active_get_engine(&obj->last_write);
-       err->engine = engine ? engine->id : -1;
 }
 
 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
 
        if (resv && !reservation_object_test_signaled_rcu(resv, false))
                return true;
 
-       return engine != i915_gem_active_get_engine(&obj->last_write);
+       return engine != i915_gem_active_get_engine(&obj->last_write,
+                                                   &obj->base.dev->struct_mutex);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
        } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
                engine = &dev_priv->engine[BCS];
        } else if (INTEL_INFO(dev)->gen >= 7) {
-               engine = i915_gem_active_get_engine(&obj->last_write);
+               engine = i915_gem_active_get_engine(&obj->last_write,
+                                                   &obj->base.dev->struct_mutex);
                if (engine == NULL || engine->id != RCS)
                        engine = &dev_priv->engine[BCS];
        } else {
        if (mmio_flip) {
                INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
 
-               work->flip_queued_req = i915_gem_active_get(&obj->last_write);
+               work->flip_queued_req = i915_gem_active_get(&obj->last_write,
+                                                           &obj->base.dev->struct_mutex);
                schedule_work(&work->mmio_work);
        } else {
                request = i915_gem_request_alloc(engine, engine->last_context);
 
        if (ret == 0) {
                to_intel_plane_state(new_state)->wait_req =
-                       i915_gem_active_get(&obj->last_write);
+                       i915_gem_active_get(&obj->last_write,
+                                           &obj->base.dev->struct_mutex);
        }
 
        return ret;