int ret;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
        int ret;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
 
        /* XXX: RCS is the only one to auto invalidate the TLBs? */
        if (ring->id != RCS) {
-               ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+               ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
                if (ret)
                        return ret;
        }
 
 );
 
 TRACE_EVENT(i915_gem_ring_flush,
-           TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
-           TP_ARGS(ring, invalidate, flush),
+           TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
+           TP_ARGS(req, invalidate, flush),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
                             ),
 
            TP_fast_assign(
-                          __entry->dev = ring->dev->primary->index;
-                          __entry->ring = ring->id;
+                          __entry->dev = req->ring->dev->primary->index;
+                          __entry->ring = req->ring->id;
                           __entry->invalidate = invalidate;
                           __entry->flush = flush;
                           ),
 
 }
 
 static int
-gen2_render_ring_flush(struct intel_engine_cs *ring,
+gen2_render_ring_flush(struct drm_i915_gem_request *req,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 cmd;
        int ret;
 
 }
 
 static int
-gen4_render_ring_flush(struct intel_engine_cs *ring,
+gen4_render_ring_flush(struct drm_i915_gem_request *req,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_device *dev = ring->dev;
        u32 cmd;
        int ret;
 }
 
 static int
-gen6_render_ring_flush(struct intel_engine_cs *ring,
-                         u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req,
+                      u32 invalidate_domains, u32 flush_domains)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 flags = 0;
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 }
 
 static int
-gen7_render_ring_flush(struct intel_engine_cs *ring,
+gen7_render_ring_flush(struct drm_i915_gem_request *req,
                       u32 invalidate_domains, u32 flush_domains)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 flags = 0;
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 }
 
 static int
-gen8_render_ring_flush(struct intel_engine_cs *ring,
+gen8_render_ring_flush(struct drm_i915_gem_request *req,
                       u32 invalidate_domains, u32 flush_domains)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 flags = 0;
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 }
 
 static int
-bsd_ring_flush(struct intel_engine_cs *ring,
+bsd_ring_flush(struct drm_i915_gem_request *req,
               u32     invalidate_domains,
               u32     flush_domains)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        ret = intel_ring_begin(ring, 2);
                   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 }
 
-static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
                               u32 invalidate, u32 flush)
 {
+       struct intel_engine_cs *ring = req->ring;
        uint32_t cmd;
        int ret;
 
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct intel_engine_cs *ring,
+static int gen6_ring_flush(struct drm_i915_gem_request *req,
                           u32 invalidate, u32 flush)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_device *dev = ring->dev;
        uint32_t cmd;
        int ret;
        if (!ring->gpu_caches_dirty)
                return 0;
 
-       ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+       ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
-       trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+       trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
 
        ring->gpu_caches_dirty = false;
        return 0;
        if (ring->gpu_caches_dirty)
                flush_domains = I915_GEM_GPU_DOMAINS;
 
-       ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+       ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
        if (ret)
                return ret;
 
-       trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+       trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 
        ring->gpu_caches_dirty = false;
        return 0;