// SPDX-License-Identifier: MIT
 
-#include <drm/drm_exec.h>
-
 #include "nouveau_drv.h"
 #include "nouveau_gem.h"
 #include "nouveau_mem.h"
  */
 
 static int
-nouveau_exec_job_submit(struct nouveau_job *job)
+nouveau_exec_job_submit(struct nouveau_job *job,
+                       struct drm_gpuvm_exec *vme)
 {
        struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
        struct nouveau_cli *cli = job->cli;
        struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
-       struct drm_exec *exec = &job->exec;
-       struct drm_gem_object *obj;
-       unsigned long index;
        int ret;
 
        /* Create a new fence, but do not emit yet. */
                return ret;
 
        nouveau_uvmm_lock(uvmm);
-       drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
-                           DRM_EXEC_IGNORE_DUPLICATES);
-       drm_exec_until_all_locked(exec) {
-               struct drm_gpuva *va;
-
-               drm_gpuvm_for_each_va(va, &uvmm->base) {
-                       if (unlikely(va == &uvmm->base.kernel_alloc_node))
-                               continue;
-
-                       ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
-                       drm_exec_retry_on_contention(exec);
-                       if (ret)
-                               goto err_uvmm_unlock;
-               }
+       ret = drm_gpuvm_exec_lock(vme);
+       if (ret) {
+               nouveau_uvmm_unlock(uvmm);
+               return ret;
        }
        nouveau_uvmm_unlock(uvmm);
 
-       drm_exec_for_each_locked_object(exec, index, obj) {
-               struct nouveau_bo *nvbo = nouveau_gem_object(obj);
-
-               ret = nouveau_bo_validate(nvbo, true, false);
-               if (ret)
-                       goto err_exec_fini;
+       ret = drm_gpuvm_exec_validate(vme);
+       if (ret) {
+               drm_gpuvm_exec_unlock(vme);
+               return ret;
        }
 
        return 0;
-
-err_uvmm_unlock:
-       nouveau_uvmm_unlock(uvmm);
-err_exec_fini:
-       drm_exec_fini(exec);
-       return ret;
-
 }
 
 static void
-nouveau_exec_job_armed_submit(struct nouveau_job *job)
+nouveau_exec_job_armed_submit(struct nouveau_job *job,
+                             struct drm_gpuvm_exec *vme)
 {
-       struct drm_exec *exec = &job->exec;
-       struct drm_gem_object *obj;
-       unsigned long index;
-
-       drm_exec_for_each_locked_object(exec, index, obj)
-               dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
-
-       drm_exec_fini(exec);
+       drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
+                                     job->resv_usage, job->resv_usage);
+       drm_gpuvm_exec_unlock(vme);
 }
 
 static struct dma_fence *
 
 static void
 op_map_prepare_unwind(struct nouveau_uvma *uvma)
 {
+       struct drm_gpuva *va = &uvma->va;
        nouveau_uvma_gem_put(uvma);
-       drm_gpuva_remove(&uvma->va);
+       drm_gpuva_remove(va);
        nouveau_uvma_free(uvma);
 }
 
                        break;
                case DRM_GPUVA_OP_REMAP: {
                        struct drm_gpuva_op_remap *r = &op->remap;
+                       struct drm_gpuva *va = r->unmap->va;
 
                        if (r->next)
                                op_map_prepare_unwind(new->next);
                        if (r->prev)
                                op_map_prepare_unwind(new->prev);
 
-                       op_unmap_prepare_unwind(r->unmap->va);
+                       op_unmap_prepare_unwind(va);
                        break;
                }
                case DRM_GPUVA_OP_UNMAP:
                                        goto unwind;
                                }
                        }
+
                        break;
                }
                case DRM_GPUVA_OP_REMAP: {
 }
 
 static int
-nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
+bind_lock_validate(struct nouveau_job *job, struct drm_exec *exec,
+                  unsigned int num_fences)
+{
+       struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+       struct bind_job_op *op;
+       int ret;
+
+       list_for_each_op(op, &bind_job->ops) {
+               struct drm_gpuva_op *va_op;
+
+               if (!op->ops)
+                       continue;
+
+               drm_gpuva_for_each_op(va_op, op->ops) {
+                       struct drm_gem_object *obj = op_gem_obj(va_op);
+
+                       if (unlikely(!obj))
+                               continue;
+
+                       ret = drm_exec_prepare_obj(exec, obj, num_fences);
+                       if (ret)
+                               return ret;
+
+                       /* Don't validate GEMs backing mappings we're about to
+                        * unmap, it's not worth the effort.
+                        */
+                       if (va_op->op == DRM_GPUVA_OP_UNMAP)
+                               continue;
+
+                       ret = nouveau_bo_validate(nouveau_gem_object(obj),
+                                                 true, false);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int
+nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
+                            struct drm_gpuvm_exec *vme)
 {
        struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
        struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
        struct nouveau_sched_entity *entity = job->entity;
-       struct drm_exec *exec = &job->exec;
+       struct drm_exec *exec = &vme->exec;
        struct bind_job_op *op;
        int ret;
 
                        dma_resv_unlock(obj->resv);
                        if (IS_ERR(op->vm_bo))
                                return PTR_ERR(op->vm_bo);
+
+                       drm_gpuvm_bo_extobj_add(op->vm_bo);
                }
 
                ret = bind_validate_op(job, op);
         * unwind all GPU VA space changes on failure.
         */
        nouveau_uvmm_lock(uvmm);
+
        list_for_each_op(op, &bind_job->ops) {
                switch (op->op) {
                case OP_MAP_SPARSE:
                }
        }
 
-       drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
-                           DRM_EXEC_IGNORE_DUPLICATES);
+       drm_exec_init(exec, vme->flags);
        drm_exec_until_all_locked(exec) {
-               list_for_each_op(op, &bind_job->ops) {
-                       struct drm_gpuva_op *va_op;
-
-                       if (IS_ERR_OR_NULL(op->ops))
-                               continue;
-
-                       drm_gpuva_for_each_op(va_op, op->ops) {
-                               struct drm_gem_object *obj = op_gem_obj(va_op);
-
-                               if (unlikely(!obj))
-                                       continue;
-
-                               ret = drm_exec_prepare_obj(exec, obj, 1);
-                               drm_exec_retry_on_contention(exec);
-                               if (ret) {
-                                       op = list_last_op(&bind_job->ops);
-                                       goto unwind;
-                               }
-                       }
-               }
-       }
-
-       list_for_each_op(op, &bind_job->ops) {
-               struct drm_gpuva_op *va_op;
-
-               if (IS_ERR_OR_NULL(op->ops))
-                       continue;
-
-               drm_gpuva_for_each_op(va_op, op->ops) {
-                       struct drm_gem_object *obj = op_gem_obj(va_op);
-
-                       if (unlikely(!obj))
-                               continue;
-
-                       /* Don't validate GEMs backing mappings we're about to
-                        * unmap, it's not worth the effort.
-                        */
-                       if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
-                               continue;
-
-                       ret = nouveau_bo_validate(nouveau_gem_object(obj),
-                                                 true, false);
-                       if (ret) {
-                               op = list_last_op(&bind_job->ops);
-                               goto unwind;
-                       }
+               ret = bind_lock_validate(job, exec, vme->num_fences);
+               drm_exec_retry_on_contention(exec);
+               if (ret) {
+                       op = list_last_op(&bind_job->ops);
+                       goto unwind;
                }
        }
 
        }
 
        nouveau_uvmm_unlock(uvmm);
-       drm_exec_fini(exec);
+       drm_gpuvm_exec_unlock(vme);
        return ret;
 }
 
 static void
-nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job)
+nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job,
+                                  struct drm_gpuvm_exec *vme)
 {
-       struct drm_exec *exec = &job->exec;
-       struct drm_gem_object *obj;
-       unsigned long index;
-
-       drm_exec_for_each_locked_object(exec, index, obj)
-               dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
-
-       drm_exec_fini(exec);
+       drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
+                                     job->resv_usage, job->resv_usage);
+       drm_gpuvm_exec_unlock(vme);
 }
 
 static struct dma_fence *
        kfree(uvmm);
 }
 
+static int
+nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
+{
+       struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj);
+
+       return nouveau_bo_validate(nvbo, true, false);
+}
+
 static const struct drm_gpuvm_ops gpuvm_ops = {
        .vm_free = nouveau_uvmm_free,
+       .vm_bo_validate = nouveau_uvmm_bo_validate,
 };
 
 int