obj->dirty = 1;
                        obj->last_write_seqno = intel_ring_get_seqno(ring);
                        if (obj->pin_count) /* check for potential scanout */
-                               intel_mark_fb_busy(obj);
+                               intel_mark_fb_busy(obj, ring);
                }
 
                trace_i915_gem_object_change_domain(obj, old_read, old_write);
 
        }
 }
 
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
+                       struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_crtc *crtc;
                if (!crtc->fb)
                        continue;
 
-               if (to_intel_framebuffer(crtc->fb)->obj == obj)
-                       intel_increase_pllclock(crtc);
+               if (to_intel_framebuffer(crtc->fb)->obj != obj)
+                       continue;
+
+               intel_increase_pllclock(crtc);
+               if (ring && intel_fbc_enabled(dev))
+                       ring->fbc_dirty = true;
        }
 }
 
                goto cleanup_pending;
 
        intel_disable_fbc(dev);
-       intel_mark_fb_busy(obj);
+       intel_mark_fb_busy(obj, NULL);
        mutex_unlock(&dev->struct_mutex);
 
        trace_i915_flip_request(intel_crtc->plane, obj);
 
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
 extern void intel_mark_busy(struct drm_device *dev);
-extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
+extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
+                              struct intel_ring_buffer *ring);
 extern void intel_mark_idle(struct drm_device *dev);
 extern bool intel_lvds_init(struct drm_device *dev);
 extern bool intel_is_dual_link_lvds(struct drm_device *dev);
 
         */
        u32 outstanding_lazy_request;
        bool gpu_caches_dirty;
+       bool fbc_dirty;
 
        wait_queue_head_t irq_queue;