}
 }
 
+bool i915_semaphore_is_enabled(struct drm_device *dev)
+{
+       if (INTEL_INFO(dev)->gen < 6)
+               return 0;
+
+       if (i915_semaphores >= 0)
+               return i915_semaphores;
+
+       /* Enable semaphores on SNB when IO remapping is off */
+       if (INTEL_INFO(dev)->gen == 6)
+               return !intel_iommu_enabled;
+
+       return 1;
+}
+
 void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
        int count;
 
 #include <linux/i2c-algo-bit.h>
 #include <drm/intel-gtt.h>
 #include <linux/backlight.h>
+#include <linux/intel-iommu.h>
 
 /* General customization:
  */
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+                        struct intel_ring_buffer *to);
 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                                    struct intel_ring_buffer *ring,
                                    u32 seqno);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 
+extern bool i915_semaphore_is_enabled(struct drm_device *dev);
 extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
 extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
 extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
 
        return 0;
 }
 
+int
+i915_gem_object_sync(struct drm_i915_gem_object *obj,
+                    struct intel_ring_buffer *to)
+{
+       struct intel_ring_buffer *from = obj->ring;
+       u32 seqno;
+       int ret, idx;
+
+       if (from == NULL || to == from)
+               return 0;
+
+       if (!i915_semaphore_is_enabled(obj->base.dev))
+               return i915_gem_object_wait_rendering(obj);
+
+       idx = intel_ring_sync_index(from, to);
+
+       seqno = obj->last_rendering_seqno;
+       if (seqno <= from->sync_seqno[idx])
+               return 0;
+
+       if (seqno == from->outstanding_lazy_request) {
+               struct drm_i915_gem_request *request;
+
+               request = kzalloc(sizeof(*request), GFP_KERNEL);
+               if (request == NULL)
+                       return -ENOMEM;
+
+               ret = i915_add_request(from, NULL, request);
+               if (ret) {
+                       kfree(request);
+                       return ret;
+               }
+
+               seqno = request->seqno;
+       }
+
+       from->sync_seqno[idx] = seqno;
+
+       return to->sync_to(to, from, seqno - 1);
+
+}
+
 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
 {
        u32 old_write_domain, old_read_domains;
  * Prepare buffer for display plane (scanout, cursors, etc).
  * Can be called from an uninterruptible phase (modesetting) and allows
  * any flushes to be pipelined (for pageflips).
- *
- * For the display plane, we want to be in the GTT but out of any write
- * domains. So in many ways this looks like set_to_gtt_domain() apart from the
- * ability to pipeline the waits, pinning and any additional subtleties
- * that may differentiate the display plane from ordinary buffers.
  */
 int
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                return ret;
 
        if (pipelined != obj->ring) {
-               ret = i915_gem_object_wait_rendering(obj);
-               if (ret == -ERESTARTSYS)
+               ret = i915_gem_object_sync(obj, pipelined);
+               if (ret)
                        return ret;
        }
 
 
        return 0;
 }
 
-static bool
-intel_enable_semaphores(struct drm_device *dev)
-{
-       if (INTEL_INFO(dev)->gen < 6)
-               return 0;
-
-       if (i915_semaphores >= 0)
-               return i915_semaphores;
-
-       /* Disable semaphores on SNB */
-       if (INTEL_INFO(dev)->gen == 6)
-               return 0;
-
-       return 1;
-}
-
-static int
-i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
-                              struct intel_ring_buffer *to)
-{
-       struct intel_ring_buffer *from = obj->ring;
-       u32 seqno;
-       int ret, idx;
-
-       if (from == NULL || to == from)
-               return 0;
-
-       /* XXX gpu semaphores are implicated in various hard hangs on SNB */
-       if (!intel_enable_semaphores(obj->base.dev))
-               return i915_gem_object_wait_rendering(obj);
-
-       idx = intel_ring_sync_index(from, to);
-
-       seqno = obj->last_rendering_seqno;
-       if (seqno <= from->sync_seqno[idx])
-               return 0;
-
-       if (seqno == from->outstanding_lazy_request) {
-               struct drm_i915_gem_request *request;
-
-               request = kzalloc(sizeof(*request), GFP_KERNEL);
-               if (request == NULL)
-                       return -ENOMEM;
-
-               ret = i915_add_request(from, NULL, request);
-               if (ret) {
-                       kfree(request);
-                       return ret;
-               }
-
-               seqno = request->seqno;
-       }
-
-       from->sync_seqno[idx] = seqno;
-
-       return to->sync_to(to, from, seqno - 1);
-}
-
 static int
 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
 {
        }
 
        list_for_each_entry(obj, objects, exec_list) {
-               ret = i915_gem_execbuffer_sync_rings(obj, ring);
+               ret = i915_gem_object_sync(obj, ring);
                if (ret)
                        return ret;
        }