From: Matthew Auld Date: Thu, 28 Aug 2025 14:24:39 +0000 (+0100) Subject: drm/xe/pt: unify xe_pt_svm_pre_commit with userptr X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=7477c4bd20dc1aba02ef2d955591826da24e2b04;p=users%2Fhch%2Fmisc.git drm/xe/pt: unify xe_pt_svm_pre_commit with userptr We now use the same notifier lock for SVM and userptr, with that we can combine xe_pt_userptr_pre_commit and xe_pt_svm_pre_commit. v2: (Matt B) - Re-use xe_svm_notifier_lock/unlock for userptr. - Combine svm/userptr handling further down into op_check_svm_userptr. v3: - Only hide the ops if we lack DRM_GPUSVM, since we also need them for userptr. Suggested-by: Matthew Brost Signed-off-by: Matthew Auld Cc: Himal Prasad Ghimiray Cc: Thomas Hellström Reviewed-by: Matthew Brost Link: https://lore.kernel.org/r/20250828142430.615826-18-matthew.auld@intel.com --- diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index 382b25798bc3..07ecf4132d41 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -1377,6 +1377,7 @@ static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update) pt_update_ops, rftree); } +#if IS_ENABLED(CONFIG_DRM_GPUSVM) #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) @@ -1432,8 +1433,8 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma, return 0; } -static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op, - struct xe_vm_pgtable_update_ops *pt_update) +static int op_check_svm_userptr(struct xe_vm *vm, struct xe_vma_op *op, + struct xe_vm_pgtable_update_ops *pt_update) { int err = 0; @@ -1455,9 +1456,40 @@ static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op, case DRM_GPUVA_OP_UNMAP: break; case DRM_GPUVA_OP_PREFETCH: - err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), - pt_update); + if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))) { + struct xe_svm_range *range = op->map_range.range; + unsigned long i; + + xe_assert(vm->xe, + xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))); + xa_for_each(&op->prefetch_range.range, i, range) { + xe_svm_range_debug(range, "PRE-COMMIT"); + + if (!xe_svm_range_pages_valid(range)) { + xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); + return -ENODATA; + } + } + } else { + err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), pt_update); + } break; +#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) + case DRM_GPUVA_OP_DRIVER: + if (op->subop == XE_VMA_SUBOP_MAP_RANGE) { + struct xe_svm_range *range = op->map_range.range; + + xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); + + xe_svm_range_debug(range, "PRE-COMMIT"); + + if (!xe_svm_range_pages_valid(range)) { + xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); + return -EAGAIN; + } + } + break; +#endif default: drm_warn(&vm->xe->drm, "NOT POSSIBLE"); } @@ -1465,7 +1497,7 @@ static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op, return err; } -static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) +static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) { struct xe_vm *vm = pt_update->vops->vm; struct xe_vma_ops *vops = pt_update->vops; @@ -1478,69 +1510,18 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) if (err) return err; - down_read(&vm->svm.gpusvm.notifier_lock); + xe_svm_notifier_lock(vm); list_for_each_entry(op, &vops->list, link) { - err = op_check_userptr(vm, op, pt_update_ops); + err = op_check_svm_userptr(vm, op, pt_update_ops); if (err) { - up_read(&vm->svm.gpusvm.notifier_lock); + xe_svm_notifier_unlock(vm); break; } } return err; } - -#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) -static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update) -{ - struct xe_vm *vm = pt_update->vops->vm; - struct xe_vma_ops *vops = pt_update->vops; - struct xe_vma_op *op; - unsigned long i; - int err; - - err = xe_pt_pre_commit(pt_update); - if (err) - return err; - - xe_svm_notifier_lock(vm); - - list_for_each_entry(op, &vops->list, link) { - struct xe_svm_range *range = NULL; - - if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) - continue; - - if (op->base.op == DRM_GPUVA_OP_PREFETCH) { - xe_assert(vm->xe, - xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))); - xa_for_each(&op->prefetch_range.range, i, range) { - xe_svm_range_debug(range, "PRE-COMMIT"); - - if (!xe_svm_range_pages_valid(range)) { - xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); - xe_svm_notifier_unlock(vm); - return -ENODATA; - } - } - } else { - xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); - xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE); - range = op->map_range.range; - - xe_svm_range_debug(range, "PRE-COMMIT"); - - if (!xe_svm_range_pages_valid(range)) { - xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); - xe_svm_notifier_unlock(vm); - return -EAGAIN; - } - } - } - - return 0; -} #endif struct xe_pt_stage_unbind_walk { @@ -1844,7 +1825,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, xe_vma_start(vma), xe_vma_end(vma)); ++pt_update_ops->current_op; - pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); + pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma); /* * If rebind, we have to invalidate TLB on !LR vms to invalidate @@ -1952,7 +1933,7 @@ static int unbind_op_prepare(struct xe_tile *tile, xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma), xe_vma_end(vma)); ++pt_update_ops->current_op; - pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); + pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma); pt_update_ops->needs_invalidation = true; xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries); @@ -2339,20 +2320,14 @@ static const struct xe_migrate_pt_update_ops migrate_ops = { .pre_commit = xe_pt_pre_commit, }; -static const struct xe_migrate_pt_update_ops userptr_migrate_ops = { - .populate = xe_vm_populate_pgtable, - .clear = xe_migrate_clear_pgtable_callback, - .pre_commit = xe_pt_userptr_pre_commit, -}; - -#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) -static const struct xe_migrate_pt_update_ops svm_migrate_ops = { +#if IS_ENABLED(CONFIG_DRM_GPUSVM) +static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = { .populate = xe_vm_populate_pgtable, .clear = xe_migrate_clear_pgtable_callback, - .pre_commit = xe_pt_svm_pre_commit, + .pre_commit = xe_pt_svm_userptr_pre_commit, }; #else -static const struct xe_migrate_pt_update_ops svm_migrate_ops; +static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops; #endif static struct xe_dep_scheduler *to_dep_scheduler(struct xe_exec_queue *q, @@ -2390,9 +2365,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) int err = 0, i; struct xe_migrate_pt_update update = { .ops = pt_update_ops->needs_svm_lock ? - &svm_migrate_ops : - pt_update_ops->needs_userptr_lock ? - &userptr_migrate_ops : + &svm_userptr_migrate_ops : &migrate_ops, .vops = vops, .tile_id = tile->id, @@ -2534,8 +2507,6 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) if (pt_update_ops->needs_svm_lock) xe_svm_notifier_unlock(vm); - if (pt_update_ops->needs_userptr_lock) - up_read(&vm->svm.gpusvm.notifier_lock); xe_tlb_inval_job_put(mjob); xe_tlb_inval_job_put(ijob); diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h index 17cdd7c7e9f5..881f01e14db8 100644 --- a/drivers/gpu/drm/xe/xe_pt_types.h +++ b/drivers/gpu/drm/xe/xe_pt_types.h @@ -105,8 +105,6 @@ struct xe_vm_pgtable_update_ops { u32 current_op; /** @needs_svm_lock: Needs SVM lock */ bool needs_svm_lock; - /** @needs_userptr_lock: Needs userptr lock */ - bool needs_userptr_lock; /** @needs_invalidation: Needs invalidation */ bool needs_invalidation; /**