]> www.infradead.org Git - nvme.git/commitdiff
drm/xe: Make TLB invalidation fences unordered
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Wed, 27 Mar 2024 09:11:35 +0000 (10:11 +0100)
committerLucas De Marchi <lucas.demarchi@intel.com>
Thu, 4 Apr 2024 13:32:28 +0000 (08:32 -0500)
They can actually complete out-of-order, so allocate a unique
fence context for each fence.

Fixes: 5387e865d90e ("drm/xe: Add TLB invalidation fence after rebinds issued from execs")
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <stable@vger.kernel.org> # v6.8+
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240327091136.3271-4-thomas.hellstrom@linux.intel.com
(cherry picked from commit 0453f1757501df2e82b66b3183a24bba5a6f8fa3)
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
drivers/gpu/drm/xe/xe_gt_types.h
drivers/gpu/drm/xe/xe_pt.c

index f03e077f81a04fcb9344f8c634856acab516c6f1..e598a4363d0190504d9ca8d826d7d996f0d2dfaf 100644 (file)
@@ -61,7 +61,6 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
        INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
        spin_lock_init(&gt->tlb_invalidation.pending_lock);
        spin_lock_init(&gt->tlb_invalidation.lock);
-       gt->tlb_invalidation.fence_context = dma_fence_context_alloc(1);
        INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
                          xe_gt_tlb_fence_timeout);
 
index 70c615dd14986599324a2fb68f766889761c7eb1..07b2f724ec45685feaa4b5ab86b6f2011f65198e 100644 (file)
@@ -177,13 +177,6 @@ struct xe_gt {
                 * xe_gt_tlb_fence_timeout after the timeut interval is over.
                 */
                struct delayed_work fence_tdr;
-               /** @tlb_invalidation.fence_context: context for TLB invalidation fences */
-               u64 fence_context;
-               /**
-                * @tlb_invalidation.fence_seqno: seqno to TLB invalidation fences, protected by
-                * tlb_invalidation.lock
-                */
-               u32 fence_seqno;
                /** @tlb_invalidation.lock: protects TLB invalidation fences */
                spinlock_t lock;
        } tlb_invalidation;
index 5fc4ad1e42981a6cbe510fcfbbecd9786c718e49..29d1a31f98041422eab107aa74567d19e77d9b97 100644 (file)
@@ -1135,8 +1135,7 @@ static int invalidation_fence_init(struct xe_gt *gt,
        spin_lock_irq(&gt->tlb_invalidation.lock);
        dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
                       &gt->tlb_invalidation.lock,
-                      gt->tlb_invalidation.fence_context,
-                      ++gt->tlb_invalidation.fence_seqno);
+                      dma_fence_context_alloc(1), 1);
        spin_unlock_irq(&gt->tlb_invalidation.lock);
 
        INIT_LIST_HEAD(&ifence->base.link);