/* Unconditionally invalidate gpu caches and ensure that we do flush
         * any residual writes from the previous batch.
         */
-       return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
+       return req->engine->emit_flush(req, EMIT_INVALIDATE);
 }
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
        if (w->count == 0)
                return 0;
 
-       ret = req->engine->emit_flush(req,
-                                     I915_GEM_GPU_DOMAINS,
-                                     I915_GEM_GPU_DOMAINS);
+       ret = req->engine->emit_flush(req, EMIT_BARRIER);
        if (ret)
                return ret;
 
 
        intel_ring_advance(ring);
 
-       ret = req->engine->emit_flush(req,
-                                     I915_GEM_GPU_DOMAINS,
-                                     I915_GEM_GPU_DOMAINS);
+       ret = req->engine->emit_flush(req, EMIT_BARRIER);
        if (ret)
                return ret;
 
        I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 }
 
-static int gen8_emit_flush(struct drm_i915_gem_request *request,
-                          u32 invalidate_domains,
-                          u32 unused)
+static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 {
        struct intel_ring *ring = request->ring;
        u32 cmd;
         */
        cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
 
-       if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
+       if (mode & EMIT_INVALIDATE) {
                cmd |= MI_INVALIDATE_TLB;
                if (request->engine->id == VCS)
                        cmd |= MI_INVALIDATE_BSD;
 }
 
 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
-                                 u32 invalidate_domains,
-                                 u32 flush_domains)
+                                 u32 mode)
 {
        struct intel_ring *ring = request->ring;
        struct intel_engine_cs *engine = request->engine;
 
        flags |= PIPE_CONTROL_CS_STALL;
 
-       if (flush_domains) {
+       if (mode & EMIT_FLUSH) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
 
-       if (invalidate_domains) {
+       if (mode & EMIT_INVALIDATE) {
                flags |= PIPE_CONTROL_TLB_INVALIDATE;
                flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
 
 }
 
 static int
-gen2_render_ring_flush(struct drm_i915_gem_request *req,
-                      u32      invalidate_domains,
-                      u32      flush_domains)
+gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
        struct intel_ring *ring = req->ring;
        u32 cmd;
        int ret;
 
        cmd = MI_FLUSH;
-       if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
-               cmd |= MI_NO_WRITE_FLUSH;
 
-       if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+       if (mode & EMIT_INVALIDATE)
                cmd |= MI_READ_FLUSH;
 
        ret = intel_ring_begin(req, 2);
 }
 
 static int
-gen4_render_ring_flush(struct drm_i915_gem_request *req,
-                      u32      invalidate_domains,
-                      u32      flush_domains)
+gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
        struct intel_ring *ring = req->ring;
        u32 cmd;
         */
 
        cmd = MI_FLUSH;
-       if (invalidate_domains) {
+       if (mode & EMIT_INVALIDATE) {
                cmd |= MI_EXE_FLUSH;
                if (IS_G4X(req->i915) || IS_GEN5(req->i915))
                        cmd |= MI_INVALIDATE_ISP;
 }
 
 static int
-gen6_render_ring_flush(struct drm_i915_gem_request *req,
-                      u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
        struct intel_ring *ring = req->ring;
        u32 scratch_addr =
         * number of bits based on the write domains has little performance
         * impact.
         */
-       if (flush_domains) {
+       if (mode & EMIT_FLUSH) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
                /*
                 */
                flags |= PIPE_CONTROL_CS_STALL;
        }
-       if (invalidate_domains) {
+       if (mode & EMIT_INVALIDATE) {
                flags |= PIPE_CONTROL_TLB_INVALIDATE;
                flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
 }
 
 static int
-gen7_render_ring_flush(struct drm_i915_gem_request *req,
-                      u32 invalidate_domains, u32 flush_domains)
+gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
        struct intel_ring *ring = req->ring;
        u32 scratch_addr =
         * number of bits based on the write domains has little performance
         * impact.
         */
-       if (flush_domains) {
+       if (mode & EMIT_FLUSH) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
-       if (invalidate_domains) {
+       if (mode & EMIT_INVALIDATE) {
                flags |= PIPE_CONTROL_TLB_INVALIDATE;
                flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
 }
 
 static int
-gen8_render_ring_flush(struct drm_i915_gem_request *req,
-                      u32 invalidate_domains, u32 flush_domains)
+gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
        u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        u32 flags = 0;
 
        flags |= PIPE_CONTROL_CS_STALL;
 
-       if (flush_domains) {
+       if (mode & EMIT_FLUSH) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
-       if (invalidate_domains) {
+       if (mode & EMIT_INVALIDATE) {
                flags |= PIPE_CONTROL_TLB_INVALIDATE;
                flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
        if (w->count == 0)
                return 0;
 
-       ret = req->engine->emit_flush(req,
-                                     I915_GEM_GPU_DOMAINS,
-                                     I915_GEM_GPU_DOMAINS);
+       ret = req->engine->emit_flush(req, EMIT_BARRIER);
        if (ret)
                return ret;
 
 
        intel_ring_advance(ring);
 
-       ret = req->engine->emit_flush(req,
-                                     I915_GEM_GPU_DOMAINS,
-                                     I915_GEM_GPU_DOMAINS);
+       ret = req->engine->emit_flush(req, EMIT_BARRIER);
        if (ret)
                return ret;
 
 }
 
 static int
-bsd_ring_flush(struct drm_i915_gem_request *req,
-              u32     invalidate_domains,
-              u32     flush_domains)
+bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
        struct intel_ring *ring = req->ring;
        int ret;
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
-                              u32 invalidate, u32 flush)
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
        struct intel_ring *ring = req->ring;
        uint32_t cmd;
         * operation is complete. This bit is only valid when the
         * Post-Sync Operation field is a value of 1h or 3h."
         */
-       if (invalidate & I915_GEM_GPU_DOMAINS)
+       if (mode & EMIT_INVALIDATE)
                cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
        intel_ring_emit(ring, cmd);
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct drm_i915_gem_request *req,
-                          u32 invalidate, u32 flush)
+static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
        struct intel_ring *ring = req->ring;
        uint32_t cmd;
         * operation is complete. This bit is only valid when the
         * Post-Sync Operation field is a value of 1h or 3h."
         */
-       if (invalidate & I915_GEM_DOMAIN_RENDER)
+       if (mode & EMIT_INVALIDATE)
                cmd |= MI_INVALIDATE_TLB;
        intel_ring_emit(ring, cmd);
        intel_ring_emit(ring,