From ba324ffb25999e14b8488d085ae9916d6f4a6bcf Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 16 Apr 2025 16:59:08 -0400 Subject: [PATCH] drm/amdgpu/userq: optimize enforce isolation and s/r If user queues are disabled for all IPs in the case of suspend and resume and for gfx/compute in the case of enforce isolation, we can return early. Reviewed-by: Prike Liang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index 59488acd89fa..17bf2d568ae2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -759,12 +759,16 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) int amdgpu_userq_suspend(struct amdgpu_device *adev) { + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; int ret = 0; + if (!ip_mask) + return 0; + mutex_lock(&adev->userq_mutex); list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { cancel_delayed_work_sync(&uqm->resume_work); @@ -779,12 +783,16 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev) int amdgpu_userq_resume(struct amdgpu_device *adev) { + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; int ret = 0; + if (!ip_mask) + return 0; + mutex_lock(&adev->userq_mutex); list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { @@ -799,12 +807,17 @@ int amdgpu_userq_resume(struct amdgpu_device *adev) int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx) { + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; int ret = 0; + /* only need to stop gfx/compute */ + if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) + return 0; + mutex_lock(&adev->userq_mutex); if (adev->userq_halt_for_enforce_isolation) dev_warn(adev->dev, "userq scheduling already stopped!\n"); @@ -827,12 +840,17 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx) { + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; int ret = 0; + /* only need to stop gfx/compute */ + if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) + return 0; + mutex_lock(&adev->userq_mutex); if (!adev->userq_halt_for_enforce_isolation) dev_warn(adev->dev, "userq scheduling already started!\n"); -- 2.50.1