return 0;
 }
 
+static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
+{
+       struct i915_request *rq;
+
+       /*
+        * Completely unscientific finger-in-the-air estimates for suitable
+        * maximum user request size (to avoid blocking) and then backoff.
+        */
+       if (intel_ring_update_space(ring) >= PAGE_SIZE)
+               return NULL;
+
+       /*
+        * Find a request that after waiting upon, there will be at least half
+        * the ring available. The hysteresis allows us to compete for the
+        * shared ring and should mean that we sleep less often prior to
+        * claiming our resources, but not so long that the ring completely
+        * drains before we can submit our next request.
+        */
+       list_for_each_entry(rq, &ring->request_list, ring_link) {
+               if (__intel_ring_space(rq->postfix,
+                                      ring->emit, ring->size) > ring->size / 2)
+                       break;
+       }
+       if (&rq->ring_link == &ring->request_list)
+               return NULL; /* weird, we will check again later for real */
+
+       return i915_request_get(rq);
+}
+
+static int eb_wait_for_ring(const struct i915_execbuffer *eb)
+{
+       const struct intel_context *ce;
+       struct i915_request *rq;
+       int ret = 0;
+
+       /*
+        * Apply a light amount of backpressure to prevent excessive hogs
+        * from blocking waiting for space whilst holding struct_mutex and
+        * keeping all of their resources pinned.
+        */
+
+       ce = to_intel_context(eb->ctx, eb->engine);
+       if (!ce->ring) /* first use, assume empty! */
+               return 0;
+
+       rq = __eb_wait_for_ring(ce->ring);
+       if (rq) {
+               mutex_unlock(&eb->i915->drm.struct_mutex);
+
+               if (i915_request_wait(rq,
+                                     I915_WAIT_INTERRUPTIBLE,
+                                     MAX_SCHEDULE_TIMEOUT) < 0)
+                       ret = -EINTR;
+
+               i915_request_put(rq);
+
+               mutex_lock(&eb->i915->drm.struct_mutex);
+       }
+
+       return ret;
+}
+
 static int eb_lookup_vmas(struct i915_execbuffer *eb)
 {
        struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
        if (err)
                goto err_rpm;
 
+       err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
+       if (unlikely(err))
+               goto err_unlock;
+
        err = eb_relocate(&eb);
        if (err) {
                /*
 err_vma:
        if (eb.exec)
                eb_release_vmas(&eb);
+err_unlock:
        mutex_unlock(&dev->struct_mutex);
 err_rpm:
        intel_runtime_pm_put(eb.i915, wakeref);
 
                I915_GEM_HWS_INDEX_ADDR);
 }
 
-static unsigned int __intel_ring_space(unsigned int head,
-                                      unsigned int tail,
-                                      unsigned int size)
-{
-       /*
-        * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
-        * same cacheline, the Head Pointer must not be greater than the Tail
-        * Pointer."
-        */
-       GEM_BUG_ON(!is_power_of_2(size));
-       return (head - tail - CACHELINE_BYTES) & (size - 1);
-}
-
 unsigned int intel_ring_update_space(struct intel_ring *ring)
 {
        unsigned int space;
 
        return tail;
 }
 
+static inline unsigned int
+__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
+{
+       /*
+        * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+        * same cacheline, the Head Pointer must not be greater than the Tail
+        * Pointer."
+        */
+       GEM_BUG_ON(!is_power_of_2(size));
+       return (head - tail - CACHELINE_BYTES) & (size - 1);
+}
+
 void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
 
 int intel_engine_setup_common(struct intel_engine_cs *engine);