]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/xe: Create ordered workqueue for GT TLB invalidation jobs
authorMatthew Brost <matthew.brost@intel.com>
Thu, 24 Jul 2025 19:12:12 +0000 (12:12 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Fri, 25 Jul 2025 01:25:58 +0000 (18:25 -0700)
No sense to schedule GT TLB invalidation jobs in parallel which target
the same GT given these all contend on the same lock, create ordered
workqueue for GT TLB invalidation jobs.

v3:
 - Fix type in commmit message (Stuart)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Stuart Summers <stuart.summers@intel.com>
Link: https://lore.kernel.org/r/20250724191216.4076566-4-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
drivers/gpu/drm/xe/xe_gt_types.h

index 086c12ee3d9de08e0b4a84f3c2120ab119ea1eab..02f0bb92d6e06fe37a3dca9d8154d2fbe5c3e10f 100644 (file)
@@ -3,6 +3,8 @@
  * Copyright © 2023 Intel Corporation
  */
 
+#include <drm/drm_managed.h>
+
 #include "xe_gt_tlb_invalidation.h"
 
 #include "abi/guc_actions_abi.h"
@@ -123,6 +125,12 @@ int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
        INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
                          xe_gt_tlb_fence_timeout);
 
+       gt->tlb_invalidation.job_wq =
+               drmm_alloc_ordered_workqueue(&gt_to_xe(gt)->drm, "gt-tbl-inval-job-wq",
+                                            WQ_MEM_RECLAIM);
+       if (IS_ERR(gt->tlb_invalidation.job_wq))
+               return PTR_ERR(gt->tlb_invalidation.job_wq);
+
        return 0;
 }
 
index 96344c604726048721ceac03e4057dd7f886a1d2..dfd4a16da5f05b0b3bfe3d97e2b079273a88aca5 100644 (file)
@@ -210,6 +210,8 @@ struct xe_gt {
                 * xe_gt_tlb_fence_timeout after the timeut interval is over.
                 */
                struct delayed_work fence_tdr;
+               /** @wtlb_invalidation.wq: schedules GT TLB invalidation jobs */
+               struct workqueue_struct *job_wq;
                /** @tlb_invalidation.lock: protects TLB invalidation fences */
                spinlock_t lock;
        } tlb_invalidation;