]> www.infradead.org Git - users/hch/configfs.git/commitdiff
drm/xe: Drop xe_gt_tlb_invalidation_wait
authorMatthew Brost <matthew.brost@intel.com>
Fri, 19 Jul 2024 17:29:03 +0000 (10:29 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Sat, 20 Jul 2024 02:45:31 +0000 (19:45 -0700)
Having two methods to wait on GT TLB invalidations is not ideal. Remove
xe_gt_tlb_invalidation_wait and only use GT TLB invalidation fences.

In addition to two methods being less than ideal, once GT TLB
invalidations are coalesced the seqno cannot be assigned during
xe_gt_tlb_invalidation_ggtt/range. Thus xe_gt_tlb_invalidation_wait
would not have a seqno to wait one. A fence however can be armed and
later signaled.

v3:
 - Add explaination about coalescing to commit message
v4:
 - Don't put dma fence if defined on stack (CI)
v5:
 - Initialize ret to zero (CI)
v6:
 - Use invalidation_fence_signal helper in tlb timeout (Matthew Auld)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240719172905.1527927-3-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_vm.c

index 92a18a0e4acd19172b5170c7aaa4c067c1f714a7..c3419d4412ce85340b6deb15abd5425c00a81873 100644 (file)
@@ -17,6 +17,8 @@
 #include "xe_trace.h"
 #include "regs/xe_guc_regs.h"
 
+#define FENCE_STACK_BIT                DMA_FENCE_FLAG_USER_BITS
+
 /*
  * TLB inval depends on pending commands in the CT queue and then the real
  * invalidation time. Double up the time to process full CT queue
@@ -33,6 +35,23 @@ static long tlb_timeout_jiffies(struct xe_gt *gt)
        return hw_tlb_timeout + 2 * delay;
 }
 
+static void
+__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
+{
+       bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
+
+       trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
+       dma_fence_signal(&fence->base);
+       if (!stack)
+               dma_fence_put(&fence->base);
+}
+
+static void
+invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
+{
+       list_del(&fence->link);
+       __invalidation_fence_signal(xe, fence);
+}
 
 static void xe_gt_tlb_fence_timeout(struct work_struct *work)
 {
@@ -54,10 +73,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
                xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
                          fence->seqno, gt->tlb_invalidation.seqno_recv);
 
-               list_del(&fence->link);
                fence->base.error = -ETIME;
-               dma_fence_signal(&fence->base);
-               dma_fence_put(&fence->base);
+               invalidation_fence_signal(xe, fence);
        }
        if (!list_empty(&gt->tlb_invalidation.pending_fences))
                queue_delayed_work(system_wq,
@@ -87,21 +104,6 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
        return 0;
 }
 
-static void
-__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
-       trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
-       dma_fence_signal(&fence->base);
-       dma_fence_put(&fence->base);
-}
-
-static void
-invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
-       list_del(&fence->link);
-       __invalidation_fence_signal(xe, fence);
-}
-
 /**
  * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
  * @gt: graphics tile
@@ -111,7 +113,6 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
 void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
 {
        struct xe_gt_tlb_invalidation_fence *fence, *next;
-       struct xe_guc *guc = &gt->uc.guc;
        int pending_seqno;
 
        /*
@@ -134,7 +135,6 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
        else
                pending_seqno = gt->tlb_invalidation.seqno - 1;
        WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
-       wake_up_all(&guc->ct.wq);
 
        list_for_each_entry_safe(fence, next,
                                 &gt->tlb_invalidation.pending_fences, link)
@@ -165,6 +165,8 @@ static int send_tlb_invalidation(struct xe_guc *guc,
        int seqno;
        int ret;
 
+       xe_gt_assert(gt, fence);
+
        /*
         * XXX: The seqno algorithm relies on TLB invalidation being processed
         * in order which they currently are, if that changes the algorithm will
@@ -173,10 +175,8 @@ static int send_tlb_invalidation(struct xe_guc *guc,
 
        mutex_lock(&guc->ct.lock);
        seqno = gt->tlb_invalidation.seqno;
-       if (fence) {
-               fence->seqno = seqno;
-               trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
-       }
+       fence->seqno = seqno;
+       trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
        action[1] = seqno;
        ret = xe_guc_ct_send_locked(&guc->ct, action, len,
                                    G2H_LEN_DW_TLB_INVALIDATE, 1);
@@ -209,7 +209,6 @@ static int send_tlb_invalidation(struct xe_guc *guc,
                        TLB_INVALIDATION_SEQNO_MAX;
                if (!gt->tlb_invalidation.seqno)
                        gt->tlb_invalidation.seqno = 1;
-               ret = seqno;
        }
        mutex_unlock(&guc->ct.lock);
 
@@ -223,14 +222,16 @@ static int send_tlb_invalidation(struct xe_guc *guc,
 /**
  * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
  * @gt: graphics tile
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion
  *
  * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
- * caller can use seqno + xe_gt_tlb_invalidation_wait to wait for completion.
+ * caller can use the invalidation fence to wait for completion.
  *
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: 0 on success, negative error code on error
  */
-static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
+static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
+                                     struct xe_gt_tlb_invalidation_fence *fence)
 {
        u32 action[] = {
                XE_GUC_ACTION_TLB_INVALIDATION,
@@ -238,7 +239,7 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
                MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
        };
 
-       return send_tlb_invalidation(&gt->uc.guc, NULL, action,
+       return send_tlb_invalidation(&gt->uc.guc, fence, action,
                                     ARRAY_SIZE(action));
 }
 
@@ -257,13 +258,15 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
 
        if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
            gt->uc.guc.submission_state.enabled) {
-               int seqno;
+               struct xe_gt_tlb_invalidation_fence fence;
+               int ret;
 
-               seqno = xe_gt_tlb_invalidation_guc(gt);
-               if (seqno <= 0)
-                       return seqno;
+               xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
+               ret = xe_gt_tlb_invalidation_guc(gt, &fence);
+               if (ret < 0)
+                       return ret;
 
-               xe_gt_tlb_invalidation_wait(gt, seqno);
+               xe_gt_tlb_invalidation_fence_wait(&fence);
        } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
                if (IS_SRIOV_VF(xe))
                        return 0;
@@ -290,18 +293,16 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
  *
  * @gt: graphics tile
  * @fence: invalidation fence which will be signal on TLB invalidation
- * completion, can be NULL
+ * completion
  * @start: start address
  * @end: end address
  * @asid: address space id
  *
  * Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can either use
- * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
- * completion.
+ * TLB invalidation. Completion of TLB is asynchronous and caller can use
+ * the invalidation fence to wait for completion.
  *
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: Negative error code on error, 0 on success
  */
 int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
                                 struct xe_gt_tlb_invalidation_fence *fence,
@@ -312,11 +313,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
        u32 action[MAX_TLB_INVALIDATION_LEN];
        int len = 0;
 
+       xe_gt_assert(gt, fence);
+
        /* Execlists not supported */
        if (gt_to_xe(gt)->info.force_execlist) {
-               if (fence)
-                       __invalidation_fence_signal(xe, fence);
-
+               __invalidation_fence_signal(xe, fence);
                return 0;
        }
 
@@ -382,12 +383,10 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
  * @vma: VMA to invalidate
  *
  * Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can either use
- * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
- * completion.
+ * TLB invalidation. Completion of TLB is asynchronous and caller can use
+ * the invalidation fence to wait for completion.
  *
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: Negative error code on error, 0 on success
  */
 int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
                               struct xe_gt_tlb_invalidation_fence *fence,
@@ -400,43 +399,6 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
                                            xe_vma_vm(vma)->usm.asid);
 }
 
-/**
- * xe_gt_tlb_invalidation_wait - Wait for TLB to complete
- * @gt: graphics tile
- * @seqno: seqno to wait which was returned from xe_gt_tlb_invalidation
- *
- * Wait for tlb_timeout_jiffies() for a TLB invalidation to complete.
- *
- * Return: 0 on success, -ETIME on TLB invalidation timeout
- */
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
-{
-       struct xe_guc *guc = &gt->uc.guc;
-       int ret;
-
-       /* Execlists not supported */
-       if (gt_to_xe(gt)->info.force_execlist)
-               return 0;
-
-       /*
-        * XXX: See above, this algorithm only works if seqno are always in
-        * order
-        */
-       ret = wait_event_timeout(guc->ct.wq,
-                                tlb_invalidation_seqno_past(gt, seqno),
-                                tlb_timeout_jiffies(gt));
-       if (!ret) {
-               struct drm_printer p = xe_gt_err_printer(gt);
-
-               xe_gt_err(gt, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
-                         seqno, gt->tlb_invalidation.seqno_recv);
-               xe_guc_ct_print(&guc->ct, &p, true);
-               return -ETIME;
-       }
-
-       return 0;
-}
-
 /**
  * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
  * @guc: guc
@@ -480,12 +442,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
                return 0;
        }
 
-       /*
-        * wake_up_all() and wait_event_timeout() already have the correct
-        * barriers.
-        */
        WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
-       wake_up_all(&guc->ct.wq);
 
        list_for_each_entry_safe(fence, next,
                                 &gt->tlb_invalidation.pending_fences, link) {
@@ -530,11 +487,13 @@ static const struct dma_fence_ops invalidation_fence_ops = {
  * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
  * @gt: GT
  * @fence: TLB invalidation fence to initialize
+ * @stack: fence is stack variable
  *
  * Initialize TLB invalidation fence for use
  */
 void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
-                                      struct xe_gt_tlb_invalidation_fence *fence)
+                                      struct xe_gt_tlb_invalidation_fence *fence,
+                                      bool stack)
 {
        spin_lock_irq(&gt->tlb_invalidation.lock);
        dma_fence_init(&fence->base, &invalidation_fence_ops,
@@ -542,5 +501,8 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
                       dma_fence_context_alloc(1), 1);
        spin_unlock_irq(&gt->tlb_invalidation.lock);
        INIT_LIST_HEAD(&fence->link);
-       dma_fence_get(&fence->base);
+       if (stack)
+               set_bit(FENCE_STACK_BIT, &fence->base.flags);
+       else
+               dma_fence_get(&fence->base);
 }
index 948f4a2f52144e06c3cfdeef59ef14718592c7ef..f430d5797af701cd500f8b890169ffbeef9403fd 100644 (file)
@@ -23,10 +23,16 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
 int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
                                 struct xe_gt_tlb_invalidation_fence *fence,
                                 u64 start, u64 end, u32 asid);
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
 int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
 
 void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
-                                      struct xe_gt_tlb_invalidation_fence *fence);
+                                      struct xe_gt_tlb_invalidation_fence *fence,
+                                      bool stack);
+
+static inline void
+xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
+{
+       dma_fence_wait(&fence->base, false);
+}
 
 #endif /* _XE_GT_TLB_INVALIDATION_ */
index 1caa99b22c73093bd6e2438dcf3b2570b733ab51..c24e869b7eaead6b3ab93fb0825f1d3a3b1cca4e 100644 (file)
@@ -1355,7 +1355,7 @@ static void invalidation_fence_init(struct xe_gt *gt,
 
        trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
 
-       xe_gt_tlb_invalidation_fence_init(gt, &ifence->base);
+       xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false);
 
        ifence->fence = fence;
        ifence->gt = gt;
index 6adb0ff09d40fcf639a16bfbdfe66fc6f0a8451d..86e9e2f877e51f175bd6f8bbced6cc4643da6b54 100644 (file)
@@ -3180,10 +3180,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
 {
        struct xe_device *xe = xe_vma_vm(vma)->xe;
        struct xe_tile *tile;
+       struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE];
        u32 tile_needs_invalidate = 0;
-       int seqno[XE_MAX_TILES_PER_DEVICE];
        u8 id;
-       int ret;
+       int ret = 0;
 
        xe_assert(xe, !xe_vma_is_null(vma));
        trace_xe_vma_invalidate(vma);
@@ -3208,29 +3208,31 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
 
        for_each_tile(tile, xe, id) {
                if (xe_pt_zap_ptes(tile, vma)) {
-                       tile_needs_invalidate |= BIT(id);
                        xe_device_wmb(xe);
+                       xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+                                                         &fence[id], true);
+
                        /*
                         * FIXME: We potentially need to invalidate multiple
                         * GTs within the tile
                         */
-                       seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
-                       if (seqno[id] < 0)
-                               return seqno[id];
-               }
-       }
-
-       for_each_tile(tile, xe, id) {
-               if (tile_needs_invalidate & BIT(id)) {
-                       ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
+                       ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
+                                                        &fence[id], vma);
                        if (ret < 0)
-                               return ret;
+                               goto wait;
+
+                       tile_needs_invalidate |= BIT(id);
                }
        }
 
+wait:
+       for_each_tile(tile, xe, id)
+               if (tile_needs_invalidate & BIT(id))
+                       xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
        vma->tile_invalidated = vma->tile_mask;
 
-       return 0;
+       return ret;
 }
 
 struct xe_vm_snapshot {