]> www.infradead.org Git - users/hch/configfs.git/commitdiff
drm/xe: Simplify locking in new_vma
authorMatthew Brost <matthew.brost@intel.com>
Tue, 18 Jun 2024 00:38:59 +0000 (17:38 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Thu, 20 Jun 2024 22:33:48 +0000 (15:33 -0700)
Rather than acquiring and dropping the VM / BO dma-resv around
xe_vma_create and do the same thing upon adding preempt fences or an
error, hold these locks through the entire new_vma() function.

v2:
 - Rebase (CI)

Cc: Fei Yang <fei.yang@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Jagmeet Randhawa <jagmeet.randhawa@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240618003859.3239239-1-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_vm.c

index 61d4d95a5377b68bc302d74eaae0387c05e94936..5b166fa03684e24e05e0309976ee2d378af5dff2 100644 (file)
@@ -180,16 +180,14 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
        struct xe_exec_queue *q;
        int err;
 
+       xe_bo_assert_held(bo);
+
        if (!vm->preempt.num_exec_queues)
                return 0;
 
-       err = xe_bo_lock(bo, true);
-       if (err)
-               return err;
-
        err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
        if (err)
-               goto out_unlock;
+               return err;
 
        list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
                if (q->lr.pfence) {
@@ -198,9 +196,7 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
                                           DMA_RESV_USAGE_BOOKKEEP);
                }
 
-out_unlock:
-       xe_bo_unlock(bo);
-       return err;
+       return 0;
 }
 
 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
@@ -2140,7 +2136,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
        struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
        struct drm_exec exec;
        struct xe_vma *vma;
-       int err;
+       int err = 0;
 
        lockdep_assert_held_write(&vm->lock);
 
@@ -2165,23 +2161,22 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
        vma = xe_vma_create(vm, bo, op->gem.offset,
                            op->va.addr, op->va.addr +
                            op->va.range - 1, pat_index, flags);
-       if (bo)
-               drm_exec_fini(&exec);
+       if (IS_ERR(vma))
+               goto err_unlock;
 
-       if (xe_vma_is_userptr(vma)) {
+       if (xe_vma_is_userptr(vma))
                err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
-               if (err) {
-                       prep_vma_destroy(vm, vma, false);
-                       xe_vma_destroy_unlocked(vma);
-                       return ERR_PTR(err);
-               }
-       } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
+       else if (!xe_vma_has_no_bo(vma) && !bo->vm)
                err = add_preempt_fences(vm, bo);
-               if (err) {
-                       prep_vma_destroy(vm, vma, false);
-                       xe_vma_destroy_unlocked(vma);
-                       return ERR_PTR(err);
-               }
+
+err_unlock:
+       if (bo)
+               drm_exec_fini(&exec);
+
+       if (err) {
+               prep_vma_destroy(vm, vma, false);
+               xe_vma_destroy_unlocked(vma);
+               vma = ERR_PTR(err);
        }
 
        return vma;