i915_gem_chipset_flush(dev);
 
 out:
-       intel_fb_obj_flush(obj, false);
+       intel_fb_obj_flush(obj, false, ORIGIN_CPU);
        return ret;
 }
 
        }
 
 out_flush:
-       intel_fb_obj_flush(obj, false);
+       intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 out_unpin:
        i915_gem_object_ggtt_unpin(obj);
 out:
        if (needs_clflush_after)
                i915_gem_chipset_flush(dev);
 
-       intel_fb_obj_flush(obj, false);
+       intel_fb_obj_flush(obj, false, ORIGIN_CPU);
        return ret;
 }
 
        RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
 
        i915_gem_request_assign(&obj->last_write_req, NULL);
-       intel_fb_obj_flush(obj, true);
+       intel_fb_obj_flush(obj, true, ORIGIN_CS);
 }
 
 static void
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
 
-       intel_fb_obj_flush(obj, false);
+       intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 
        trace_i915_gem_object_change_domain(obj,
                                            obj->base.read_domains,
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
 
-       intel_fb_obj_flush(obj, false);
+       intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 
        trace_i915_gem_object_change_domain(obj,
                                            obj->base.read_domains,
 
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
                                     unsigned frontbuffer_bits);
 void intel_frontbuffer_flush(struct drm_device *dev,
-                            unsigned frontbuffer_bits);
+                            unsigned frontbuffer_bits,
+                            enum fb_op_origin origin);
 void intel_frontbuffer_flip(struct drm_device *dev,
                            unsigned frontbuffer_bits);
-
 unsigned int intel_fb_align_height(struct drm_device *dev,
                                   unsigned int height,
                                   uint32_t pixel_format,
                                   uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
-
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
+                       enum fb_op_origin origin);
 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
                              uint32_t pixel_format);
 
 
  * intel_frontbuffer_flush - flush frontbuffer
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
  *
  * This function gets called every time rendering on the given planes has
  * completed and frontbuffer caching can be started again. Flushes will get
  * Can be called without any locks held.
  */
 void intel_frontbuffer_flush(struct drm_device *dev,
-                            unsigned frontbuffer_bits)
+                            unsigned frontbuffer_bits,
+                            enum fb_op_origin origin)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
 
  * then any delayed flushes will be unblocked.
  */
 void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                       bool retire)
+                       bool retire, enum fb_op_origin origin)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
                mutex_unlock(&dev_priv->fb_tracking.lock);
        }
 
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
+       intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
 }
 
 /**
        dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
        mutex_unlock(&dev_priv->fb_tracking.lock);
 
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
+       intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
 
 /**
        dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
        mutex_unlock(&dev_priv->fb_tracking.lock);
 
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
+       intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }