/* Attempt to reap some mmap space from dead objects */
        do {
-               err = i915_gem_wait_for_idle(i915,
-                                            I915_WAIT_INTERRUPTIBLE,
-                                            MAX_SCHEDULE_TIMEOUT);
+               err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
                if (err)
                        break;
 
 
 {
        bool result = !intel_gt_is_wedged(gt);
 
-       if (i915_gem_wait_for_idle(gt->i915,
-                                  I915_WAIT_FOR_IDLE_BOOST,
-                                  I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+       if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
                /* XXX hide warning from gem_eio */
                if (i915_modparams.reset) {
                        dev_err(gt->i915->drm.dev,
 
 
        if ((flags & TEST_IDLE) && ret == 0) {
                ret = i915_gem_wait_for_idle(ce->engine->i915,
-                                            0, MAX_SCHEDULE_TIMEOUT);
+                                            MAX_SCHEDULE_TIMEOUT);
                if (ret)
                        return ret;
 
 
 
 static void wait_for_idle(struct intel_gt *gt)
 {
-       if (i915_gem_wait_for_idle(gt->i915, 0,
-                                  I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+       if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
                /*
                 * Forcibly cancel outstanding work and leave
                 * the gpu quiet.
 
                i915_retire_requests(i915);
 
        if (val & (DROP_IDLE | DROP_ACTIVE)) {
-               ret = i915_gem_wait_for_idle(i915,
-                                            I915_WAIT_INTERRUPTIBLE,
-                                            MAX_SCHEDULE_TIMEOUT);
+               ret = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
                if (ret)
                        return ret;
        }
 
 void i915_gem_driver_unregister(struct drm_i915_private *i915);
 void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
 void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-                          unsigned int flags, long timeout);
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, long timeout);
 void i915_gem_suspend(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
 
        }
 }
 
-static long
-wait_for_timelines(struct intel_gt *gt, unsigned int wait, long timeout)
-{
-       struct intel_gt_timelines *timelines = >->timelines;
-       struct intel_timeline *tl;
-       unsigned long flags;
-
-       spin_lock_irqsave(&timelines->lock, flags);
-       list_for_each_entry(tl, &timelines->active_list, link) {
-               struct dma_fence *fence;
-
-               fence = i915_active_fence_get(&tl->last_request);
-               if (!fence)
-                       continue;
-
-               spin_unlock_irqrestore(&timelines->lock, flags);
-
-               if (!dma_fence_is_i915(fence)) {
-                       timeout = dma_fence_wait_timeout(fence,
-                                                        flags & I915_WAIT_INTERRUPTIBLE,
-                                                        timeout);
-               } else {
-                       struct i915_request *rq = to_request(fence);
-
-                       /*
-                        * "Race-to-idle".
-                        *
-                        * Switching to the kernel context is often used as
-                        * a synchronous step prior to idling, e.g. in suspend
-                        * for flushing all current operations to memory before
-                        * sleeping. These we want to complete as quickly as
-                        * possible to avoid prolonged stalls, so allow the gpu
-                        * to boost to maximum clocks.
-                        */
-                       if (flags & I915_WAIT_FOR_IDLE_BOOST)
-                               gen6_rps_boost(rq);
-
-                       timeout = i915_request_wait(rq, flags, timeout);
-               }
-
-               dma_fence_put(fence);
-               if (timeout < 0)
-                       return timeout;
-
-               /* restart after reacquiring the lock */
-               spin_lock_irqsave(&timelines->lock, flags);
-               tl = list_entry(&timelines->active_list, typeof(*tl), link);
-       }
-       spin_unlock_irqrestore(&timelines->lock, flags);
-
-       return timeout;
-}
-
-int i915_gem_wait_for_idle(struct drm_i915_private *i915,
-                          unsigned int flags, long timeout)
+int i915_gem_wait_for_idle(struct drm_i915_private *i915, long timeout)
 {
        struct intel_gt *gt = &i915->gt;
 
        if (!intel_gt_pm_is_awake(gt))
                return 0;
 
-       do {
-               timeout = wait_for_timelines(gt, flags, timeout);
-               if (timeout < 0)
-                       return timeout;
-
+       while ((timeout = i915_retire_requests_timeout(i915, timeout)) > 0) {
                cond_resched();
                if (signal_pending(current))
                        return -EINTR;
+       }
 
-       } while (i915_retire_requests(i915));
-
-       return 0;
+       return timeout;
 }
 
 struct i915_vma *
 
         * the hopes that we can then remove contexts and the like only
         * bound by their active reference.
         */
-       return i915_gem_wait_for_idle(i915,
-                                     I915_WAIT_INTERRUPTIBLE,
-                                     MAX_SCHEDULE_TIMEOUT);
+       return i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
 }
 
 static bool
                                    min_size, alignment, color,
                                    start, end, mode);
 
+       i915_retire_requests(vm->i915);
+
 search_again:
        active = NULL;
        INIT_LIST_HEAD(&eviction_list);
 
        trace_i915_gem_evict_node(vm, target, flags);
 
-       /* Retire before we search the active list. Although we have
+       /*
+        * Retire before we search the active list. Although we have
         * reasonable accuracy in our retirement lists, we may have
         * a stray pin (preventing eviction) that can only be resolved by
         * retiring.
         */
-       if (!(flags & PIN_NONBLOCK))
-               i915_retire_requests(vm->i915);
+       i915_retire_requests(vm->i915);
 
        if (i915_vm_has_cache_coloring(vm)) {
                /* Expand search to cover neighbouring guard pages (or lack!) */
 
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (unlikely(ggtt->do_idle_maps)) {
-               if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
+               /* XXX This does not prevent more requests being submitted! */
+               if (i915_retire_requests_timeout(dev_priv,
+                                                -MAX_SCHEDULE_TIMEOUT)) {
                        DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
 
        return timeout;
 }
 
-bool i915_retire_requests(struct drm_i915_private *i915)
+long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout)
 {
        struct intel_gt_timelines *timelines = &i915->gt.timelines;
        struct intel_timeline *tl, *tn;
+       unsigned long active_count = 0;
        unsigned long flags;
+       bool interruptible;
        LIST_HEAD(free);
 
+       interruptible = true;
+       if (timeout < 0)
+               timeout = -timeout, interruptible = false;
+
        spin_lock_irqsave(&timelines->lock, flags);
        list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
                if (!mutex_trylock(&tl->mutex))
                tl->active_count++; /* pin the list element */
                spin_unlock_irqrestore(&timelines->lock, flags);
 
+               if (timeout > 0) {
+                       struct dma_fence *fence;
+
+                       fence = i915_active_fence_get(&tl->last_request);
+                       if (fence) {
+                               timeout = dma_fence_wait_timeout(fence,
+                                                                interruptible,
+                                                                timeout);
+                               dma_fence_put(fence);
+                       }
+               }
+
                retire_requests(tl);
 
                spin_lock_irqsave(&timelines->lock, flags);
 
                /* Resume iteration after dropping lock */
                list_safe_reset_next(tl, tn, link);
-               if (!--tl->active_count)
+               if (--tl->active_count)
+                       active_count += !!rcu_access_pointer(tl->last_request.fence);
+               else
                        list_del(&tl->link);
 
                mutex_unlock(&tl->mutex);
        list_for_each_entry_safe(tl, tn, &free, link)
                __intel_timeline_free(&tl->kref);
 
-       return !list_empty(&timelines->active_list);
+       return active_count ? timeout : 0;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 
 #define I915_WAIT_INTERRUPTIBLE        BIT(0)
 #define I915_WAIT_PRIORITY     BIT(1) /* small priority bump for the request */
 #define I915_WAIT_ALL          BIT(2) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
 
 static inline bool i915_request_signaled(const struct i915_request *rq)
 {
                                         lockdep_is_held(&rq->engine->active.lock));
 }
 
-bool i915_retire_requests(struct drm_i915_private *i915);
+long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout);
+static inline void i915_retire_requests(struct drm_i915_private *i915)
+{
+       i915_retire_requests_timeout(i915, 0);
+}
 
 #endif /* I915_REQUEST_H */
 
 
        cond_resched();
 
-       i915_retire_requests(i915);
-       if (i915_gem_wait_for_idle(i915, 0, HZ / 5) == -ETIME) {
+       if (i915_gem_wait_for_idle(i915, HZ / 5) == -ETIME) {
                pr_err("%pS timed out, cancelling all further testing.\n",
                       __builtin_return_address(0));
 
                intel_gt_set_wedged(&i915->gt);
                ret = -EIO;
        }
-       i915_retire_requests(i915);
 
        return ret;
 }
 
        t->func = func;
        t->name = name;
 
-       err = i915_gem_wait_for_idle(i915,
-                                    I915_WAIT_INTERRUPTIBLE,
-                                    MAX_SCHEDULE_TIMEOUT);
+       err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
        if (err) {
                pr_err("%s(%s): failed to idle before, with err=%d!",
                       func, name, err);
 
        do {
                for_each_engine(engine, i915, id)
                        mock_engine_flush(engine);
-       } while (i915_retire_requests(i915));
+       } while (i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT));
 }
 
 static void mock_device_release(struct drm_device *dev)