return ret;
 }
 
-static int
-i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
-{
-       u32 plane, flip_mask;
-       int ret;
-
-       /* Check for any pending flips. As we only maintain a flip queue depth
-        * of 1, we can simply insert a WAIT for the next display flip prior
-        * to executing the batch and avoid stalling the CPU.
-        */
-
-       for (plane = 0; flips >> plane; plane++) {
-               if (((flips >> plane) & 1) == 0)
-                       continue;
-
-               if (plane)
-                       flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
-               else
-                       flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
-               ret = intel_ring_begin(ring, 2);
-               if (ret)
-                       return ret;
-
-               intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_advance(ring);
-       }
-
-       return 0;
-}
-
 static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
                                struct list_head *objects)
 {
        struct drm_i915_gem_object *obj;
        uint32_t flush_domains = 0;
-       uint32_t flips = 0;
        int ret;
 
        list_for_each_entry(obj, objects, exec_list) {
                if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
                        i915_gem_clflush_object(obj);
 
-               if (obj->base.pending_write_domain)
-                       flips |= atomic_read(&obj->pending_flip);
-
                flush_domains |= obj->base.write_domain;
        }
 
-       if (flips) {
-               ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
-               if (ret)
-                       return ret;
-       }
-
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                i915_gem_chipset_flush(ring->dev);
 
 
 
        obj = work->old_fb_obj;
 
-       atomic_clear_mask(1 << intel_crtc->plane,
-                         &obj->pending_flip.counter);
        wake_up(&dev_priv->pending_flip_queue);
 
        queue_work(dev_priv->wq, &work->work);
 
        work->enable_stall_check = true;
 
-       /* Block clients from rendering to the new back buffer until
-        * the flip occurs and the object is no longer visible.
-        */
-       atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
        atomic_inc(&intel_crtc->unpin_work_count);
 
        ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
 
 cleanup_pending:
        atomic_dec(&intel_crtc->unpin_work_count);
-       atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
        drm_gem_object_unreference(&work->old_fb_obj->base);
        drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);