]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
drm/i915: Serialize against vma moves
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 19 Aug 2019 11:20:33 +0000 (12:20 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 19 Aug 2019 14:25:56 +0000 (15:25 +0100)
Make sure that when submitting requests, we always serialize against
potential vma moves and clflushes.

Time for a i915_request_await_vma() interface!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190819112033.30638-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
drivers/gpu/drm/i915/gt/intel_renderstate.c
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/selftest_workarounds.c
drivers/gpu/drm/i915/selftests/i915_request.c

index 77a201bb3422ff7def63a196524db8a6fdc38bb7..577bd7c72e651cc9cb09895831a5f4dd5eb5e598 100644 (file)
@@ -1192,8 +1192,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
                goto skip_request;
 
        i915_vma_lock(batch);
-       GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
-       err = i915_vma_move_to_active(batch, rq, 0);
+       err = i915_request_await_object(rq, batch->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(batch, rq, 0);
        i915_vma_unlock(batch);
        if (err)
                goto skip_request;
index 784585afac255b559175522b552b7db28f61af84..6415f9a17e2d569922ab4036d6ce6f6f34fa702a 100644 (file)
@@ -106,7 +106,9 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
        int err;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, 0);
+       err = i915_request_await_object(rq, vma->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, 0);
        i915_vma_unlock(vma);
        if (unlikely(err))
                return err;
@@ -171,7 +173,9 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
        }
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        if (unlikely(err))
                goto out_request;
index a1a4b53cdc4a08148e6c09d84bac5eedeaf8852f..0ff7a89aadca1de7ab0287d298c5feeba5ac325b 100644 (file)
@@ -228,7 +228,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
        intel_ring_advance(rq, cs);
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        i915_vma_unpin(vma);
 
index dd87e6cd612e6257733898e5f515dd88f8489bda..3e6f4a65d35604ea34dc9f31a92f26197952dc87 100644 (file)
@@ -666,13 +666,17 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
                goto err_request;
 
        i915_vma_lock(batch);
-       err = i915_vma_move_to_active(batch, rq, 0);
+       err = i915_request_await_object(rq, batch->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(batch, rq, 0);
        i915_vma_unlock(batch);
        if (err)
                goto skip_request;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        if (err)
                goto skip_request;
@@ -1218,7 +1222,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
                goto err_request;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, 0);
+       err = i915_request_await_object(rq, vma->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, 0);
        i915_vma_unlock(vma);
        if (err)
                goto skip_request;
@@ -1315,7 +1321,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
                goto err_request;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        if (err)
                goto skip_request;
index 50aa7e95124ddd5d0e1840505ac3bd1c7b763742..1d27babff0cee6f7d60a36ebf25a9bfd9f93c593 100644 (file)
@@ -351,7 +351,10 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
                }
 
                i915_vma_lock(vma);
-               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+               err = i915_request_await_object(rq, vma->obj, true);
+               if (err == 0)
+                       err = i915_vma_move_to_active(vma, rq,
+                                                     EXEC_OBJECT_WRITE);
                i915_vma_unlock(vma);
 
                i915_request_add(rq);
index 42e1e9c58f63ed461e55cfcda938729aa50d3bcb..57ece53c1075abd362d5d00a196ed20af3b5b727 100644 (file)
@@ -139,13 +139,17 @@ int igt_gpu_fill_dw(struct i915_vma *vma,
                goto err_request;
 
        i915_vma_lock(batch);
-       err = i915_vma_move_to_active(batch, rq, 0);
+       err = i915_request_await_object(rq, batch->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(batch, rq, 0);
        i915_vma_unlock(batch);
        if (err)
                goto skip_request;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        if (err)
                goto skip_request;
index be37d4501c675c41188e3ac172df6f3809852024..6d05f9c64178fee70355bc8be4c528dd6440a38d 100644 (file)
@@ -222,7 +222,9 @@ int intel_renderstate_emit(struct i915_request *rq)
        }
 
        i915_vma_lock(so.vma);
-       err = i915_vma_move_to_active(so.vma, rq, 0);
+       err = i915_request_await_object(rq, so.vma->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(so.vma, rq, 0);
        i915_vma_unlock(so.vma);
 err_unpin:
        i915_vma_unpin(so.vma);
index 4484b4447db131a6bf8ae750c41e4ebe0311666b..298c4d191439f86767f3bee76b8a2d9f95abcd58 100644 (file)
@@ -118,7 +118,10 @@ static int move_to_active(struct i915_vma *vma,
        int err;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, flags);
+       err = i915_request_await_object(rq, vma->obj,
+                                       flags & EXEC_OBJECT_WRITE);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, flags);
        i915_vma_unlock(vma);
 
        return err;
@@ -1237,7 +1240,10 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
        }
 
        i915_vma_lock(arg.vma);
-       err = i915_vma_move_to_active(arg.vma, rq, flags);
+       err = i915_request_await_object(rq, arg.vma->obj,
+                                       flags & EXEC_OBJECT_WRITE);
+       if (err == 0)
+               err = i915_vma_move_to_active(arg.vma, rq, flags);
        i915_vma_unlock(arg.vma);
 
        if (flags & EXEC_OBJECT_NEEDS_FENCE)
index b797be1627e9815b9c9cfb1190face28e508075a..d791158988d6307245b34f63a4419896d7a9e40e 100644 (file)
@@ -1459,11 +1459,13 @@ static int smoke_submit(struct preempt_smoke *smoke,
 
        if (vma) {
                i915_vma_lock(vma);
-               err = rq->engine->emit_bb_start(rq,
-                                               vma->node.start,
-                                               PAGE_SIZE, 0);
+               err = i915_request_await_object(rq, vma->obj, false);
                if (!err)
                        err = i915_vma_move_to_active(vma, rq, 0);
+               if (!err)
+                       err = rq->engine->emit_bb_start(rq,
+                                                       vma->node.start,
+                                                       PAGE_SIZE, 0);
                i915_vma_unlock(vma);
        }
 
index eb5da01d0a08297c18cc9f0aa71c2e1c46c80b33..d06d68ac2a3b2c303f35d21787dc0467ac16f925 100644 (file)
@@ -113,7 +113,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
        }
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        if (err)
                goto err_req;
index 3937d43c2961f745f5655115c5b59dff0a8d874b..170e4afa9ccb392273a4437395ea8a5e022b88cf 100644 (file)
@@ -876,7 +876,9 @@ static int live_all_engines(void *arg)
                request[id]->batch = batch;
 
                i915_vma_lock(batch);
-               err = i915_vma_move_to_active(batch, request[id], 0);
+               err = i915_request_await_object(request[id], batch->obj, 0);
+               if (err == 0)
+                       err = i915_vma_move_to_active(batch, request[id], 0);
                i915_vma_unlock(batch);
                GEM_BUG_ON(err);