* an emission time with seqnos for tracking how far ahead of the GPU we are.
  */
 struct drm_i915_gem_request {
+       struct kref ref;
+
        /** On Which ring this request was generated */
        struct intel_engine_cs *ring;
 
        struct list_head client_list;
 };
 
+void i915_gem_request_free(struct kref *req_ref);
+
+static inline void
+i915_gem_request_reference(struct drm_i915_gem_request *req)
+{
+       kref_get(&req->ref);
+}
+
+static inline void
+i915_gem_request_unreference(struct drm_i915_gem_request *req)
+{
+       kref_put(&req->ref, i915_gem_request_free);
+}
+
+static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+                                          struct drm_i915_gem_request *src)
+{
+       if (src)
+               i915_gem_request_reference(src);
+
+       if (*pdst)
+               i915_gem_request_unreference(*pdst);
+
+       *pdst = src;
+}
+
 struct drm_i915_file_private {
        struct drm_i915_private *dev_priv;
        struct drm_file *file;
 
 
 static void i915_gem_free_request(struct drm_i915_gem_request *request)
 {
-       struct intel_context *ctx = request->ctx;
-
        list_del(&request->list);
        i915_gem_request_remove_from_client(request);
 
+       i915_gem_request_unreference(request);
+}
+
+void i915_gem_request_free(struct kref *req_ref)
+{
+       struct drm_i915_gem_request *req = container_of(req_ref,
+                                                typeof(*req), ref);
+       struct intel_context *ctx = req->ctx;
+
        if (ctx) {
                if (i915.enable_execlists) {
-                       struct intel_engine_cs *ring = request->ring;
+                       struct intel_engine_cs *ring = req->ring;
 
                        if (ctx != ring->default_context)
                                intel_lr_context_unpin(ring, ctx);
                }
+
                i915_gem_context_unreference(ctx);
        }
-       kfree(request);
+
+       kfree(req);
 }
 
 struct drm_i915_gem_request *
        }
 
        /* These may not have been flush before the reset, do so now */
-       kfree(ring->preallocated_lazy_request);
-       ring->preallocated_lazy_request = NULL;
+       i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
        ring->outstanding_lazy_seqno = 0;
 }
 
 
                }
        }
 
+       kref_init(&request->ref);
+
        ret = i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
        if (ret) {
                intel_lr_context_unpin(ring, ctx);
 
        intel_logical_ring_stop(ring);
        WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
-       ring->preallocated_lazy_request = NULL;
+       i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
        ring->outstanding_lazy_seqno = 0;
 
        if (ring->cleanup)
 
 
        intel_unpin_ringbuffer_obj(ringbuf);
        intel_destroy_ringbuffer_obj(ringbuf);
-       ring->preallocated_lazy_request = NULL;
+       i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
        ring->outstanding_lazy_seqno = 0;
 
        if (ring->cleanup)
        if (request == NULL)
                return -ENOMEM;
 
+       kref_init(&request->ref);
+
        ret = i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
        if (ret) {
                kfree(request);