]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/xe: Split lrc seqno fence creation up
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Mon, 27 May 2024 13:59:09 +0000 (15:59 +0200)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Mon, 27 May 2024 19:26:02 +0000 (21:26 +0200)
Since sometimes a lock is required to initialize a seqno fence,
and it might be desirable not to hold that lock while performing
memory allocations, split the lrc seqno fence creation up into an
allocation phase and an initialization phase.

Since lrc seqno fences under the hood are hw_fences, do the same
for these and remove the xe_hw_fence_create() function since it
is not used anymore.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240527135912.152156-3-thomas.hellstrom@linux.intel.com
drivers/gpu/drm/xe/xe_hw_fence.c
drivers/gpu/drm/xe/xe_hw_fence.h
drivers/gpu/drm/xe/xe_lrc.c
drivers/gpu/drm/xe/xe_lrc.h

index f872ef10312725e85c94411bd9b3e2d5d68b2284..35c0063a831af256ed9c7805e621b4bfbb7acc43 100644 (file)
@@ -208,23 +208,58 @@ static struct xe_hw_fence *to_xe_hw_fence(struct dma_fence *fence)
        return container_of(fence, struct xe_hw_fence, dma);
 }
 
-struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx,
-                                      struct iosys_map seqno_map)
+/**
+ * xe_hw_fence_alloc() -  Allocate an hw fence.
+ *
+ * Allocate but don't initialize an hw fence.
+ *
+ * Return: Pointer to the allocated fence or
+ * negative error pointer on error.
+ */
+struct dma_fence *xe_hw_fence_alloc(void)
 {
-       struct xe_hw_fence *fence;
+       struct xe_hw_fence *hw_fence = fence_alloc();
 
-       fence = fence_alloc();
-       if (!fence)
+       if (!hw_fence)
                return ERR_PTR(-ENOMEM);
 
-       fence->ctx = ctx;
-       fence->seqno_map = seqno_map;
-       INIT_LIST_HEAD(&fence->irq_link);
+       return &hw_fence->dma;
+}
 
-       dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock,
-                      ctx->dma_fence_ctx, ctx->next_seqno++);
+/**
+ * xe_hw_fence_free() - Free an hw fence.
+ * @fence: Pointer to the fence to free.
+ *
+ * Frees an hw fence that hasn't yet been
+ * initialized.
+ */
+void xe_hw_fence_free(struct dma_fence *fence)
+{
+       fence_free(&fence->rcu);
+}
 
-       trace_xe_hw_fence_create(fence);
+/**
+ * xe_hw_fence_init() - Initialize an hw fence.
+ * @fence: Pointer to the fence to initialize.
+ * @ctx: Pointer to the struct xe_hw_fence_ctx fence context.
+ * @seqno_map: Pointer to the map into where the seqno is blitted.
+ *
+ * Initializes a pre-allocated hw fence.
+ * After initialization, the fence is subject to normal
+ * dma-fence refcounting.
+ */
+void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx,
+                     struct iosys_map seqno_map)
+{
+       struct  xe_hw_fence *hw_fence =
+               container_of(fence, typeof(*hw_fence), dma);
+
+       hw_fence->ctx = ctx;
+       hw_fence->seqno_map = seqno_map;
+       INIT_LIST_HEAD(&hw_fence->irq_link);
+
+       dma_fence_init(fence, &xe_hw_fence_ops, &ctx->irq->lock,
+                      ctx->dma_fence_ctx, ctx->next_seqno++);
 
-       return fence;
+       trace_xe_hw_fence_create(hw_fence);
 }
index cfe5fd603787e73672e7c7ec85472dbe771a39d7..f13a1c4982c7306dfced818c664082f783faf337 100644 (file)
@@ -24,7 +24,10 @@ void xe_hw_fence_ctx_init(struct xe_hw_fence_ctx *ctx, struct xe_gt *gt,
                          struct xe_hw_fence_irq *irq, const char *name);
 void xe_hw_fence_ctx_finish(struct xe_hw_fence_ctx *ctx);
 
-struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx,
-                                      struct iosys_map seqno_map);
+struct dma_fence *xe_hw_fence_alloc(void);
 
+void xe_hw_fence_free(struct dma_fence *fence);
+
+void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx,
+                     struct iosys_map seqno_map);
 #endif
index f679cb9aaea7ea7772027c0b714349cdba9b085a..c28796cfaa030e6db541a78e4ad7bdb35f69790d 100644 (file)
@@ -1033,10 +1033,54 @@ u32 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc)
        return __xe_lrc_seqno_ggtt_addr(lrc);
 }
 
+/**
+ * xe_lrc_alloc_seqno_fence() - Allocate an lrc seqno fence.
+ *
+ * Allocate but don't initialize an lrc seqno fence.
+ *
+ * Return: Pointer to the allocated fence or
+ * negative error pointer on error.
+ */
+struct dma_fence *xe_lrc_alloc_seqno_fence(void)
+{
+       return xe_hw_fence_alloc();
+}
+
+/**
+ * xe_lrc_free_seqno_fence() - Free an lrc seqno fence.
+ * @fence: Pointer to the fence to free.
+ *
+ * Frees an lrc seqno fence that hasn't yet been
+ * initialized.
+ */
+void xe_lrc_free_seqno_fence(struct dma_fence *fence)
+{
+       xe_hw_fence_free(fence);
+}
+
+/**
+ * xe_lrc_init_seqno_fence() - Initialize an lrc seqno fence.
+ * @lrc: Pointer to the lrc.
+ * @fence: Pointer to the fence to initialize.
+ *
+ * Initializes a pre-allocated lrc seqno fence.
+ * After initialization, the fence is subject to normal
+ * dma-fence refcounting.
+ */
+void xe_lrc_init_seqno_fence(struct xe_lrc *lrc, struct dma_fence *fence)
+{
+       xe_hw_fence_init(fence, &lrc->fence_ctx, __xe_lrc_seqno_map(lrc));
+}
+
 struct dma_fence *xe_lrc_create_seqno_fence(struct xe_lrc *lrc)
 {
-       return &xe_hw_fence_create(&lrc->fence_ctx,
-                                  __xe_lrc_seqno_map(lrc))->dma;
+       struct dma_fence *fence = xe_lrc_alloc_seqno_fence();
+
+       if (IS_ERR(fence))
+               return fence;
+
+       xe_lrc_init_seqno_fence(lrc, fence);
+       return fence;
 }
 
 s32 xe_lrc_seqno(struct xe_lrc *lrc)
index b9da1031083b3dcc1580721bfb6ca9c7287247b4..7a8752fd84840566cad08de9177423d735a0dd67 100644 (file)
@@ -44,6 +44,9 @@ void xe_lrc_write_ctx_reg(struct xe_lrc *lrc, int reg_nr, u32 val);
 u64 xe_lrc_descriptor(struct xe_lrc *lrc);
 
 u32 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc);
+struct dma_fence *xe_lrc_alloc_seqno_fence(void);
+void xe_lrc_free_seqno_fence(struct dma_fence *fence);
+void xe_lrc_init_seqno_fence(struct xe_lrc *lrc, struct dma_fence *fence);
 struct dma_fence *xe_lrc_create_seqno_fence(struct xe_lrc *lrc);
 s32 xe_lrc_seqno(struct xe_lrc *lrc);