static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
                                   struct drm_gpuva_ops *ops,
                                   struct xe_sync_entry *syncs, u32 num_syncs,
-                                  struct list_head *ops_list, bool last)
+                                  struct xe_vma_ops *vops, bool last)
 {
        struct xe_device *xe = vm->xe;
        struct xe_vma_op *last_op = NULL;
        drm_gpuva_for_each_op(__op, ops) {
                struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
                struct xe_vma *vma;
-               bool first = list_empty(ops_list);
+               bool first = list_empty(&vops->list);
                unsigned int flags = 0;
 
                INIT_LIST_HEAD(&op->link);
-               list_add_tail(&op->link, ops_list);
+               list_add_tail(&op->link, &vops->list);
 
                if (first) {
                        op->flags |= XE_VMA_OP_FIRST;
        }
 
        /* FIXME: Unhandled corner case */
-       XE_WARN_ON(!last_op && last && !list_empty(ops_list));
+       XE_WARN_ON(!last_op && last && !list_empty(&vops->list));
 
        if (!last_op)
                return 0;
 
 static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
                                           struct xe_vm *vm,
-                                          struct list_head *ops_list)
+                                          struct xe_vma_ops *vops)
 {
        struct xe_vma_op *op;
        int err;
        if (err)
                return err;
 
-       list_for_each_entry(op, ops_list, link) {
+       list_for_each_entry(op, &vops->list, link) {
                err = op_lock_and_prep(exec, vm, op);
                if (err)
                        return err;
 }
 
 static struct dma_fence *ops_execute(struct xe_vm *vm,
-                                    struct list_head *ops_list,
+                                    struct xe_vma_ops *vops,
                                     bool cleanup)
 {
        struct xe_vma_op *op, *next;
        struct dma_fence *fence = NULL;
 
-       list_for_each_entry_safe(op, next, ops_list, link) {
+       list_for_each_entry_safe(op, next, &vops->list, link) {
                if (!IS_ERR(fence)) {
                        dma_fence_put(fence);
                        fence = xe_vma_op_execute(vm, op);
 }
 
 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
-                                    struct list_head *ops_list)
+                                    struct xe_vma_ops *vops)
 {
        struct drm_exec exec;
        struct dma_fence *fence;
        drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
                      DRM_EXEC_IGNORE_DUPLICATES, 0);
        drm_exec_until_all_locked(&exec) {
-               err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, ops_list);
+               err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
                drm_exec_retry_on_contention(&exec);
                if (err)
                        goto unlock;
 
-               fence = ops_execute(vm, ops_list, true);
+               fence = ops_execute(vm, vops, true);
                if (IS_ERR(fence)) {
                        err = PTR_ERR(fence);
                        /* FIXME: Killing VM rather than proper error handling */
        return err;
 }
 
+static void xe_vma_ops_init(struct xe_vma_ops *vops)
+{
+       INIT_LIST_HEAD(&vops->list);
+}
+
 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
        struct xe_device *xe = to_xe_device(dev);
        u32 num_syncs, num_ufence = 0;
        struct xe_sync_entry *syncs = NULL;
        struct drm_xe_vm_bind_op *bind_ops;
-       LIST_HEAD(ops_list);
+       struct xe_vma_ops vops;
        int err;
        int i;
 
                goto free_syncs;
        }
 
+       xe_vma_ops_init(&vops);
        for (i = 0; i < args->num_binds; ++i) {
                u64 range = bind_ops[i].range;
                u64 addr = bind_ops[i].addr;
                }
 
                err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
-                                             &ops_list,
-                                             i == args->num_binds - 1);
+                                             &vops, i == args->num_binds - 1);
                if (err)
                        goto unwind_ops;
        }
 
        /* Nothing to do */
-       if (list_empty(&ops_list)) {
+       if (list_empty(&vops.list)) {
                err = -ENODATA;
                goto unwind_ops;
        }
        if (q)
                xe_exec_queue_get(q);
 
-       err = vm_bind_ioctl_ops_execute(vm, &ops_list);
+       err = vm_bind_ioctl_ops_execute(vm, &vops);
 
        up_write(&vm->lock);