From 282e6f846d8c3fcf36293f68f38d814645c3b852 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 3 Jul 2024 21:16:50 -0700 Subject: [PATCH] drm/xe: Update VM trace events The trace events have changed moving to a single job per VM bind IOCTL, update the trace events align with old behavior as much as possible. Signed-off-by: Matthew Brost Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-6-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_trace_bo.h | 10 ++++---- drivers/gpu/drm/xe/xe_vm.c | 42 ++++++++++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h index f39f09ed3495..9b1a1d4304ae 100644 --- a/drivers/gpu/drm/xe/xe_trace_bo.h +++ b/drivers/gpu/drm/xe/xe_trace_bo.h @@ -117,11 +117,6 @@ DEFINE_EVENT(xe_vma, xe_vma_acc, TP_ARGS(vma) ); -DEFINE_EVENT(xe_vma, xe_vma_fail, - TP_PROTO(struct xe_vma *vma), - TP_ARGS(vma) -); - DEFINE_EVENT(xe_vma, xe_vma_bind, TP_PROTO(struct xe_vma *vma), TP_ARGS(vma) @@ -237,6 +232,11 @@ DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit, TP_ARGS(vm) ); +DEFINE_EVENT(xe_vm, xe_vm_ops_fail, + TP_PROTO(struct xe_vm *vm), + TP_ARGS(vm) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 73cc6b0efcef..5232856cc3fb 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2481,6 +2481,38 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, return 0; } +static void op_trace(struct xe_vma_op *op) +{ + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + trace_xe_vma_bind(op->map.vma); + break; + case DRM_GPUVA_OP_REMAP: + trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va)); + if (op->remap.prev) + trace_xe_vma_bind(op->remap.prev); + if (op->remap.next) + trace_xe_vma_bind(op->remap.next); + break; + case DRM_GPUVA_OP_UNMAP: + trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va)); + break; + case DRM_GPUVA_OP_PREFETCH: + trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va)); + break; + default: + XE_WARN_ON("NOT POSSIBLE"); + } +} + +static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops) +{ + struct xe_vma_op *op; + + list_for_each_entry(op, &vops->list, link) + op_trace(op); +} + static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) { struct xe_exec_queue *q = vops->q; @@ -2524,8 +2556,10 @@ static struct dma_fence *ops_execute(struct xe_vm *vm, if (number_tiles > 1) { fences = kmalloc_array(number_tiles, sizeof(*fences), GFP_KERNEL); - if (!fences) - return ERR_PTR(-ENOMEM); + if (!fences) { + fence = ERR_PTR(-ENOMEM); + goto err_trace; + } } for_each_tile(tile, vm->xe, id) { @@ -2539,6 +2573,8 @@ static struct dma_fence *ops_execute(struct xe_vm *vm, } } + trace_xe_vm_ops_execute(vops); + for_each_tile(tile, vm->xe, id) { if (!vops->pt_update_ops[id].num_ops) continue; @@ -2585,6 +2621,8 @@ err_out: kfree(fences); kfree(cf); +err_trace: + trace_xe_vm_ops_fail(vm); return fence; } -- 2.49.0