]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
drm/i915: Make exclusive awaits on i915_active optional
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 6 Apr 2020 15:58:38 +0000 (16:58 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 6 Apr 2020 18:48:05 +0000 (19:48 +0100)
Later use will require asynchronous waits on the active timelines, but
will not utilize an async wait on the exclusive channel. Make the await
on the exclusive fence explicit in the selection flags.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200406155840.1728-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_active.h
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_vma.c

index 5df7704369fd372b3e71e928f8d5b9212c038250..d5e24be759f79b3ccbab656c90b0db7358738615 100644 (file)
@@ -549,14 +549,15 @@ static int await_active(struct i915_active *ref,
 {
        int err = 0;
 
-       /* We must always wait for the exclusive fence! */
-       if (rcu_access_pointer(ref->excl.fence)) {
+       if (flags & I915_ACTIVE_AWAIT_EXCL &&
+           rcu_access_pointer(ref->excl.fence)) {
                err = __await_active(&ref->excl, fn, arg);
                if (err)
                        return err;
        }
 
-       if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+       if (flags & I915_ACTIVE_AWAIT_ACTIVE &&
+           i915_active_acquire_if_busy(ref)) {
                struct active_node *it, *n;
 
                rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
index b526d310a585f856d6913f0e3e6cfcc372efaa41..ffafaa78c494b8a4c01548b55a350339afb4800b 100644 (file)
@@ -193,7 +193,8 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
 int i915_request_await_active(struct i915_request *rq,
                              struct i915_active *ref,
                              unsigned int flags);
-#define I915_ACTIVE_AWAIT_ALL BIT(0)
+#define I915_ACTIVE_AWAIT_EXCL BIT(0)
+#define I915_ACTIVE_AWAIT_ACTIVE BIT(1)
 
 int i915_active_acquire(struct i915_active *ref);
 bool i915_active_acquire_if_busy(struct i915_active *ref);
index 2f78b147bb2de7ae92c2d817870f3b9dc89f520c..5cde3e4e7be600b32a7970f69aa5ce5c107cb4b6 100644 (file)
@@ -1948,7 +1948,7 @@ emit_oa_config(struct i915_perf_stream *stream,
        if (!IS_ERR_OR_NULL(active)) {
                /* After all individual context modifications */
                err = i915_request_await_active(rq, active,
-                                               I915_ACTIVE_AWAIT_ALL);
+                                               I915_ACTIVE_AWAIT_ACTIVE);
                if (err)
                        goto err_add_request;
 
index 6cc2d9c4401543f9fc57b5651e0602df85ea42c1..f0383a68c9815f0b91ac20aac048df79b2ab79db 100644 (file)
@@ -1167,7 +1167,8 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
        GEM_BUG_ON(!i915_vma_is_pinned(vma));
 
        /* Wait for the vma to be bound before we start! */
-       err = i915_request_await_active(rq, &vma->active, 0);
+       err = i915_request_await_active(rq, &vma->active,
+                                       I915_ACTIVE_AWAIT_EXCL);
        if (err)
                return err;