Currently, we remove the from per-file request list for throttling and
retirement under a dedicated spinlock, but insertion is governed by
struct_mutex. This needs to be the same lock so that the
retirement/insertion of neighbouring requests (at the tail) doesn't
break the list.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190820080907.4665-1-chris@chris-wilson.co.uk
 
 static void
 add_to_client(struct i915_request *rq, struct drm_file *file)
 {
-       rq->file_priv = file->driver_priv;
-       list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
+       struct drm_i915_file_private *file_priv = file->driver_priv;
+
+       rq->file_priv = file_priv;
+
+       spin_lock(&file_priv->mm.lock);
+       list_add_tail(&rq->client_link, &file_priv->mm.request_list);
+       spin_unlock(&file_priv->mm.lock);
 }
 
 static int eb_submit(struct i915_execbuffer *eb)
 
 }
 
 static inline void
-i915_request_remove_from_client(struct i915_request *request)
+remove_from_client(struct i915_request *request)
 {
        struct drm_i915_file_private *file_priv;
 
-       file_priv = request->file_priv;
+       file_priv = READ_ONCE(request->file_priv);
        if (!file_priv)
                return;
 
 
        local_irq_enable();
 
-       i915_request_remove_from_client(rq);
+       remove_from_client(rq);
        list_del(&rq->link);
 
        intel_context_exit(rq->hw_context);