]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/xe: Simplify VM bind IOCTL error handling and cleanup
authorMatthew Brost <matthew.brost@intel.com>
Thu, 25 Apr 2024 04:55:06 +0000 (21:55 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Fri, 26 Apr 2024 19:10:00 +0000 (12:10 -0700)
Clean up everything in VM bind IOCTL in 1 path for both errors and
non-errors. Also move VM bind IOCTL cleanup from ops (also used by
non-IOCTL binds) to the VM bind IOCTL.

v2:
 - Break ops_execute on error (Oak)

Cc: Oak Zeng <oak.zeng@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Oak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-7-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h

index be41b3f41529477dc20d9db1dcfe8d1a8c9c2a53..66a27ade77d7b8e6ed18a3ce8e77647a877560b0 100644 (file)
@@ -743,8 +743,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
 }
 
 static struct dma_fence *ops_execute(struct xe_vm *vm,
-                                    struct xe_vma_ops *vops,
-                                    bool cleanup);
+                                    struct xe_vma_ops *vops);
 static void xe_vma_ops_init(struct xe_vma_ops *vops);
 
 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
@@ -777,7 +776,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
                        goto free_ops;
        }
 
-       fence = ops_execute(vm, &vops, false);
+       fence = ops_execute(vm, &vops);
        if (IS_ERR(fence)) {
                err = PTR_ERR(fence);
        } else {
@@ -2449,7 +2448,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
        if (!last_op)
                return 0;
 
-       last_op->ops = ops;
        if (last) {
                last_op->flags |= XE_VMA_OP_LAST;
                last_op->num_syncs = num_syncs;
@@ -2619,25 +2617,6 @@ xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
        return fence;
 }
 
-static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
-{
-       bool last = op->flags & XE_VMA_OP_LAST;
-
-       if (last) {
-               while (op->num_syncs--)
-                       xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
-               kfree(op->syncs);
-               if (op->q)
-                       xe_exec_queue_put(op->q);
-       }
-       if (!list_empty(&op->link))
-               list_del(&op->link);
-       if (op->ops)
-               drm_gpuva_ops_free(&vm->gpuvm, op->ops);
-       if (last)
-               xe_vm_put(vm);
-}
-
 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
                             bool post_commit, bool prev_post_commit,
                             bool next_post_commit)
@@ -2714,8 +2693,6 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
                                         op->flags & XE_VMA_OP_PREV_COMMITTED,
                                         op->flags & XE_VMA_OP_NEXT_COMMITTED);
                }
-
-               drm_gpuva_ops_free(&vm->gpuvm, __ops);
        }
 }
 
@@ -2803,24 +2780,20 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
 }
 
 static struct dma_fence *ops_execute(struct xe_vm *vm,
-                                    struct xe_vma_ops *vops,
-                                    bool cleanup)
+                                    struct xe_vma_ops *vops)
 {
        struct xe_vma_op *op, *next;
        struct dma_fence *fence = NULL;
 
        list_for_each_entry_safe(op, next, &vops->list, link) {
-               if (!IS_ERR(fence)) {
-                       dma_fence_put(fence);
-                       fence = xe_vma_op_execute(vm, op);
-               }
+               dma_fence_put(fence);
+               fence = xe_vma_op_execute(vm, op);
                if (IS_ERR(fence)) {
                        drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
                                 op->base.op, PTR_ERR(fence));
                        fence = ERR_PTR(-ENOSPC);
+                       break;
                }
-               if (cleanup)
-                       xe_vma_op_cleanup(vm, op);
        }
 
        return fence;
@@ -2843,7 +2816,7 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
                if (err)
                        goto unlock;
 
-               fence = ops_execute(vm, vops, true);
+               fence = ops_execute(vm, vops);
                if (IS_ERR(fence)) {
                        err = PTR_ERR(fence);
                        /* FIXME: Killing VM rather than proper error handling */
@@ -3204,30 +3177,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                goto unwind_ops;
        }
 
-       xe_vm_get(vm);
-       if (q)
-               xe_exec_queue_get(q);
-
        err = vm_bind_ioctl_ops_execute(vm, &vops);
 
-       up_write(&vm->lock);
-
-       if (q)
-               xe_exec_queue_put(q);
-       xe_vm_put(vm);
-
-       for (i = 0; bos && i < args->num_binds; ++i)
-               xe_bo_put(bos[i]);
-
-       kvfree(bos);
-       kvfree(ops);
-       if (args->num_binds > 1)
-               kvfree(bind_ops);
-
-       return err;
-
 unwind_ops:
-       vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
+       if (err && err != -ENODATA)
+               vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
+       for (i = args->num_binds - 1; i >= 0; --i)
+               if (ops[i])
+                       drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
 free_syncs:
        if (err == -ENODATA)
                err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
index 466b6c62d1f9b35f5ed16f63a38689e33ae3e829..149ab892967e810a36c20c0a516137e23bb6af75 100644 (file)
@@ -330,11 +330,6 @@ enum xe_vma_op_flags {
 struct xe_vma_op {
        /** @base: GPUVA base operation */
        struct drm_gpuva_op base;
-       /**
-        * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
-        * operations is processed
-        */
-       struct drm_gpuva_ops *ops;
        /** @q: exec queue for this operation */
        struct xe_exec_queue *q;
        /**