In the next patch we want to handle reset directly by a locked waiter in
order to avoid issues with returning before the reset is handled. To
handle the reset, we must first know whether we hold the struct_mutex.
If we do not hold the struct_mtuex we can not perform the reset, but we do
not block the reset worker either (and so we can just continue to wait for
request completion) - otherwise we must relinquish the mutex.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160909131201.16673-10-chris@chris-wilson.co.uk
                return ret;
 
        if (val & DROP_ACTIVE) {
-               ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+               ret = i915_gem_wait_for_idle(dev_priv,
+                                            I915_WAIT_INTERRUPTIBLE |
+                                            I915_WAIT_LOCKED);
                if (ret)
                        goto unlock;
        }
 
 
        if (!i915.semaphores) {
                ret = i915_wait_request(from,
-                                       from->i915->mm.interruptible,
+                                       from->i915->mm.interruptible |
+                                       I915_WAIT_LOCKED,
                                        NULL,
                                        NO_WAITBOOST);
                if (ret)
        if (ret)
                goto err;
 
-       ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+       ret = i915_gem_wait_for_idle(dev_priv,
+                                    I915_WAIT_INTERRUPTIBLE |
+                                    I915_WAIT_LOCKED);
        if (ret)
                goto err;
 
 
        if (ret)
                return ret;
 
-       ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+       ret = i915_gem_wait_for_idle(dev_priv,
+                                    I915_WAIT_INTERRUPTIBLE |
+                                    I915_WAIT_LOCKED);
        if (ret)
                return ret;
 
                                return ret;
                }
 
-               ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+               ret = i915_gem_wait_for_idle(dev_priv,
+                                            I915_WAIT_INTERRUPTIBLE |
+                                            I915_WAIT_LOCKED);
                if (ret)
                        return ret;
 
 
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (unlikely(ggtt->do_idle_maps)) {
-               if (i915_gem_wait_for_idle(dev_priv, 0)) {
+               if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
                        DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
 
 
        /* Carefully retire all requests without writing to the rings */
        for_each_engine(engine, dev_priv) {
-               ret = intel_engine_idle(engine, I915_WAIT_INTERRUPTIBLE);
+               ret = intel_engine_idle(engine,
+                                       I915_WAIT_INTERRUPTIBLE |
+                                       I915_WAIT_LOCKED);
                if (ret)
                        return ret;
        }
        int ret = 0;
 
        might_sleep();
+#if IS_ENABLED(CONFIG_LOCKDEP)
+       GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+                  !!(flags & I915_WAIT_LOCKED));
+#endif
 
        if (i915_gem_request_completed(req))
                return 0;
                goto complete;
 
        set_current_state(state);
-       add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
+       if (flags & I915_WAIT_LOCKED)
+               add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
        intel_wait_init(&wait, req->fence.seqno);
        if (intel_engine_add_wait(req->engine, &wait))
                if (i915_spin_request(req, state, 2))
                        break;
        }
-       remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
        intel_engine_remove_wait(req->engine, &wait);
+       if (flags & I915_WAIT_LOCKED)
+               remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
        __set_current_state(TASK_RUNNING);
+
 complete:
        trace_i915_gem_request_wait_end(req);
 
 
                      s64 *timeout,
                      struct intel_rps_client *rps)
        __attribute__((nonnull(1)));
-#define I915_WAIT_INTERRUPTIBLE BIT(0)
+#define I915_WAIT_INTERRUPTIBLE        BIT(0)
+#define I915_WAIT_LOCKED       BIT(1) /* struct_mutex held, handle GPU reset */
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
        if (!request)
                return 0;
 
-       return i915_wait_request(request, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
+       return i915_wait_request(request,
+                                I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+                                NULL, NULL);
 }
 
 /**
        if (!request)
                return 0;
 
-       ret = i915_wait_request(request, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
+       ret = i915_wait_request(request,
+                               I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+                               NULL, NULL);
        if (ret)
                return ret;
 
 
                return NOTIFY_DONE;
 
        /* Force everything onto the inactive lists */
-       ret = i915_gem_wait_for_idle(dev_priv, 0);
+       ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
        if (ret)
                goto out;
 
 
        if (WARN_ON(&target->ring_link == &ring->request_list))
                return -ENOSPC;
 
-       ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE,
+       ret = i915_wait_request(target,
+                               I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
                                NULL, NO_WAITBOOST);
        if (ret)
                return ret;