struct drm_i915_gem_request **req_out);
 void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+                                  struct drm_file *file);
 
 static inline uint32_t
 i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void __i915_add_request(struct drm_i915_gem_request *req,
-                       struct drm_file *file,
                        struct drm_i915_gem_object *batch_obj,
                        bool flush_caches);
 #define i915_add_request(req) \
-       __i915_add_request(req, NULL, NULL, true)
+       __i915_add_request(req, NULL, true)
 #define i915_add_request_no_flush(req) \
-       __i915_add_request(req, NULL, NULL, false)
+       __i915_add_request(req, NULL, false)
 int __i915_wait_request(struct drm_i915_gem_request *req,
                        unsigned reset_counter,
                        bool interruptible,
 
        return ret;
 }
 
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+                                  struct drm_file *file)
+{
+       struct drm_i915_private *dev_private;
+       struct drm_i915_file_private *file_priv;
+
+       WARN_ON(!req || !file || req->file_priv);
+
+       if (!req || !file)
+               return -EINVAL;
+
+       if (req->file_priv)
+               return -EINVAL;
+
+       dev_private = req->ring->dev->dev_private;
+       file_priv = file->driver_priv;
+
+       spin_lock(&file_priv->mm.lock);
+       req->file_priv = file_priv;
+       list_add_tail(&req->client_list, &file_priv->mm.request_list);
+       spin_unlock(&file_priv->mm.lock);
+
+       req->pid = get_pid(task_pid(current));
+
+       return 0;
+}
+
 static inline void
 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 {
        list_del(&request->client_list);
        request->file_priv = NULL;
        spin_unlock(&file_priv->mm.lock);
+
+       put_pid(request->pid);
+       request->pid = NULL;
 }
 
 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
        list_del_init(&request->list);
        i915_gem_request_remove_from_client(request);
 
-       put_pid(request->pid);
-
        i915_gem_request_unreference(request);
 }
 
  * going to happen on the hardware. This would be a Bad Thing(tm).
  */
 void __i915_add_request(struct drm_i915_gem_request *request,
-                       struct drm_file *file,
                        struct drm_i915_gem_object *obj,
                        bool flush_caches)
 {
 
        request->emitted_jiffies = jiffies;
        list_add_tail(&request->list, &ring->request_list);
-       request->file_priv = NULL;
-
-       if (file) {
-               struct drm_i915_file_private *file_priv = file->driver_priv;
-
-               spin_lock(&file_priv->mm.lock);
-               request->file_priv = file_priv;
-               list_add_tail(&request->client_list,
-                             &file_priv->mm.request_list);
-               spin_unlock(&file_priv->mm.lock);
-
-               request->pid = get_pid(task_pid(current));
-       }
 
        trace_i915_gem_request_add(request);
 
                                                 typeof(*req), ref);
        struct intel_context *ctx = req->ctx;
 
+       if (req->file_priv)
+               i915_gem_request_remove_from_client(req);
+
        if (ctx) {
                if (i915.enable_execlists) {
                        struct intel_engine_cs *ring = req->ring;
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
 
+               /*
+                * Note that the request might not have been submitted yet.
+                * In which case emitted_jiffies will be zero.
+                */
+               if (!request->emitted_jiffies)
+                       continue;
+
                target = request;
        }
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
        params->ring->gpu_caches_dirty = true;
 
        /* Add a breadcrumb for the completion of the batch buffer */
-       __i915_add_request(params->request, params->file, params->batch_obj, true);
+       __i915_add_request(params->request, params->batch_obj, true);
 }
 
 static int
        if (ret)
                goto err_batch_unpin;
 
+       ret = i915_gem_request_add_to_client(params->request, file);
+       if (ret)
+               goto err_batch_unpin;
+
        /*
         * Save assorted stuff away to pass through to *_submission().
         * NB: This data should be 'persistent' and not local as it will