return ret;
 
        if (val & DROP_ACTIVE) {
-               ret = i915_gem_wait_for_idle(dev_priv, true);
+               ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
                if (ret)
                        goto unlock;
        }
 
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_engines(struct drm_device *dev);
 int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-                                       bool interruptible);
+                                       unsigned int flags);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void i915_gem_resume(struct drm_device *dev);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 
                int ret;
 
                ret = i915_gem_active_wait_unlocked(&active[idx],
-                                                   true, NULL, rps);
+                                                   I915_WAIT_INTERRUPTIBLE,
+                                                   NULL, rps);
                if (ret)
                        return ret;
        }
         * to claim that space for ourselves, we need to take the big
         * struct_mutex to free the requests+objects and allocate our slot.
         */
-       err = i915_gem_wait_for_idle(dev_priv, true);
+       err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
        if (err)
                return err;
 
        active = __I915_BO_ACTIVE(obj);
        for_each_active(active, idx) {
                s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
-               ret = i915_gem_active_wait_unlocked(&obj->last_read[idx], true,
+               ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
+                                                   I915_WAIT_INTERRUPTIBLE,
                                                    timeout, rps);
                if (ret)
                        break;
 }
 
 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-                          bool interruptible)
+                          unsigned int flags)
 {
        struct intel_engine_cs *engine;
        int ret;
                if (engine->last_context == NULL)
                        continue;
 
-               ret = intel_engine_idle(engine, interruptible);
+               ret = intel_engine_idle(engine, flags);
                if (ret)
                        return ret;
        }
        if (target == NULL)
                return 0;
 
-       ret = i915_wait_request(target, true, NULL, NULL);
+       ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
        i915_gem_request_put(target);
 
        return ret;
        if (ret)
                goto err;
 
-       ret = i915_gem_wait_for_idle(dev_priv, true);
+       ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
        if (ret)
                goto err;
 
 
        if (ret)
                return ret;
 
-       ret = i915_gem_wait_for_idle(dev_priv, true);
+       ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
        if (ret)
                return ret;
 
                                return ret;
                }
 
-               ret = i915_gem_wait_for_idle(dev_priv, true);
+               ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
                if (ret)
                        return ret;
 
 
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (unlikely(ggtt->do_idle_maps)) {
-               if (i915_gem_wait_for_idle(dev_priv, false)) {
+               if (i915_gem_wait_for_idle(dev_priv, 0)) {
                        DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
 
 
        /* Carefully retire all requests without writing to the rings */
        for_each_engine(engine, dev_priv) {
-               ret = intel_engine_idle(engine, true);
+               ret = intel_engine_idle(engine, I915_WAIT_INTERRUPTIBLE);
                if (ret)
                        return ret;
        }
 /**
  * i915_wait_request - wait until execution of request has finished
  * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
+ * @flags: how to wait
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  * @rps: client to charge for RPS boosting
  *
  * errno with remaining time filled in timeout argument.
  */
 int i915_wait_request(struct drm_i915_gem_request *req,
-                     bool interruptible,
+                     unsigned int flags,
                      s64 *timeout,
                      struct intel_rps_client *rps)
 {
-       int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+       const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+               TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
        DEFINE_WAIT(reset);
        struct intel_wait wait;
        unsigned long timeout_remain;
 
 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
 
 int i915_wait_request(struct drm_i915_gem_request *req,
-                     bool interruptible,
+                     unsigned int flags,
                      s64 *timeout,
                      struct intel_rps_client *rps)
        __attribute__((nonnull(1)));
+#define I915_WAIT_INTERRUPTIBLE BIT(0)
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
        if (!request)
                return 0;
 
-       return i915_wait_request(request, true, NULL, NULL);
+       return i915_wait_request(request, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
 }
 
 /**
  * i915_gem_active_wait_unlocked - waits until the request is completed
  * @active - the active request on which to wait
- * @interruptible - whether the wait can be woken by a userspace signal
+ * @flags - how to wait
  * @timeout - how long to wait at most
  * @rps - userspace client to charge for a waitboost
  *
  */
 static inline int
 i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
-                             bool interruptible,
+                             unsigned int flags,
                              s64 *timeout,
                              struct intel_rps_client *rps)
 {
 
        request = i915_gem_active_get_unlocked(active);
        if (request) {
-               ret = i915_wait_request(request, interruptible, timeout, rps);
+               ret = i915_wait_request(request, flags, timeout, rps);
                i915_gem_request_put(request);
        }
 
        if (!request)
                return 0;
 
-       ret = i915_wait_request(request, true, NULL, NULL);
+       ret = i915_wait_request(request, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
        if (ret)
                return ret;
 
 
        unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
 
        do {
-               if (i915_gem_wait_for_idle(dev_priv, false) == 0 &&
+               if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
                    i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
                        break;
 
                return NOTIFY_DONE;
 
        /* Force everything onto the inactive lists */
-       ret = i915_gem_wait_for_idle(dev_priv, false);
+       ret = i915_gem_wait_for_idle(dev_priv, 0);
        if (ret)
                goto out;
 
 
 
        for_each_active(active, idx)
                i915_gem_active_wait_unlocked(&obj->last_read[idx],
-                                             false, NULL, NULL);
+                                             0, NULL, NULL);
 }
 
 static void cancel_userptr(struct work_struct *work)
 
 
        if (work->flip_queued_req)
                WARN_ON(i915_wait_request(work->flip_queued_req,
-                                         false, NULL,
-                                         NO_WAITBOOST));
+                                         0, NULL, NO_WAITBOOST));
 
        /* For framebuffer backed by dmabuf, wait for fence */
        resv = i915_gem_object_get_dmabuf_resv(obj);
                                continue;
 
                        ret = i915_wait_request(intel_plane_state->wait_req,
-                                               true, NULL, NULL);
+                                               I915_WAIT_INTERRUPTIBLE,
+                                               NULL, NULL);
                        if (ret) {
                                /* Any hang should be swallowed by the wait */
                                WARN_ON(ret == -EIO);
                        continue;
 
                ret = i915_wait_request(intel_plane_state->wait_req,
-                                       true, NULL, NULL);
+                                       0, NULL, NULL);
                /* EIO should be eaten, and we can't get interrupted in the
                 * worker, and blocking commits have waited already. */
                WARN_ON(ret);
 
        if (WARN_ON(&target->ring_link == &ring->request_list))
                return -ENOSPC;
 
-       ret = i915_wait_request(target, true, NULL, NO_WAITBOOST);
+       ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE,
+                               NULL, NO_WAITBOOST);
        if (ret)
                return ret;
 
 
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
 static inline int intel_engine_idle(struct intel_engine_cs *engine,
-                                   bool interruptible)
+                                   unsigned int flags)
 {
        /* Wait upon the last request to be completed */
        return i915_gem_active_wait_unlocked(&engine->last_request,
-                                            interruptible, NULL, NULL);
+                                            flags, NULL, NULL);
 }
 
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);