]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/xe: Move ufence add to vm_bind_ioctl_ops_fini
authorMatthew Brost <matthew.brost@intel.com>
Thu, 25 Apr 2024 04:55:11 +0000 (21:55 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Fri, 26 Apr 2024 19:10:06 +0000 (12:10 -0700)
Rather than adding a ufence to a VMA in the bind function, add the
ufence to all VMAs in the IOCTL that require binds in
vm_bind_ioctl_ops_fini. This help withs the transition to job 1 per VM
bind IOCTL.

v2:
 - Rebase
v3:
 - Fix typo in commit (Oak)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Oak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-12-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_sync.c
drivers/gpu/drm/xe/xe_sync.h
drivers/gpu/drm/xe/xe_vm.c

index 65f1f16282356d41c2f7c6ff9f4fdcbcd9cfc951..2883d9aca404e658d6bf182bc2489ae3f9fc8db5 100644 (file)
@@ -338,6 +338,21 @@ err_out:
        return ERR_PTR(-ENOMEM);
 }
 
+/**
+ * __xe_sync_ufence_get() - Get user fence from user fence
+ * @ufence: input user fence
+ *
+ * Get a user fence reference from user fence
+ *
+ * Return: xe_user_fence pointer with reference
+ */
+struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence *ufence)
+{
+       user_fence_get(ufence);
+
+       return ufence;
+}
+
 /**
  * xe_sync_ufence_get() - Get user fence from sync
  * @sync: input sync
index 3e03396af2c6fad92c5cf9e8cf1411caf36c6488..006dbf78079365bd209696ce14996e181644e00e 100644 (file)
@@ -37,6 +37,7 @@ static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
        return !!sync->ufence;
 }
 
+struct xe_user_fence *__xe_sync_ufence_get(struct xe_user_fence *ufence);
 struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
 void xe_sync_ufence_put(struct xe_user_fence *ufence);
 int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
index 40c1258c3282e704084d92f85b60d33a0fd7c817..dfd31b346021fd423275d9f45bcaf1c57018cebf 100644 (file)
@@ -1798,17 +1798,10 @@ xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
 {
        struct dma_fence *fence;
        struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
-       struct xe_user_fence *ufence;
 
        xe_vm_assert_held(vm);
        xe_bo_assert_held(bo);
 
-       ufence = find_ufence_get(syncs, num_syncs);
-       if (vma->ufence && ufence)
-               xe_sync_ufence_put(vma->ufence);
-
-       vma->ufence = ufence ?: vma->ufence;
-
        if (immediate) {
                fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
                                       first_op, last_op);
@@ -2817,20 +2810,57 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
        return fence;
 }
 
+static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
+{
+       if (vma->ufence)
+               xe_sync_ufence_put(vma->ufence);
+       vma->ufence = __xe_sync_ufence_get(ufence);
+}
+
+static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
+                         struct xe_user_fence *ufence)
+{
+       switch (op->base.op) {
+       case DRM_GPUVA_OP_MAP:
+               vma_add_ufence(op->map.vma, ufence);
+               break;
+       case DRM_GPUVA_OP_REMAP:
+               if (op->remap.prev)
+                       vma_add_ufence(op->remap.prev, ufence);
+               if (op->remap.next)
+                       vma_add_ufence(op->remap.next, ufence);
+               break;
+       case DRM_GPUVA_OP_UNMAP:
+               break;
+       case DRM_GPUVA_OP_PREFETCH:
+               vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
+               break;
+       default:
+               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+       }
+}
+
 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
                                   struct dma_fence *fence)
 {
        struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
+       struct xe_user_fence *ufence;
        struct xe_vma_op *op;
        int i;
 
+       ufence = find_ufence_get(vops->syncs, vops->num_syncs);
        list_for_each_entry(op, &vops->list, link) {
+               if (ufence)
+                       op_add_ufence(vm, op, ufence);
+
                if (op->base.op == DRM_GPUVA_OP_UNMAP)
                        xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
                else if (op->base.op == DRM_GPUVA_OP_REMAP)
                        xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
                                       fence);
        }
+       if (ufence)
+               xe_sync_ufence_put(ufence);
        for (i = 0; i < vops->num_syncs; i++)
                xe_sync_entry_signal(vops->syncs + i, fence);
        xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);