From 94976e7e5ede65f9dfad669b1ea7170320f08399 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 10 Apr 2025 13:26:43 -0400 Subject: [PATCH 01/16] drm/amdgpu/userq: add helpers to start/stop scheduling This will be used to stop/start user queue scheduling for example when switching between kernel and user queues when enforce isolation is enabled. v2: use idx v3: only stop compute/gfx queues Reviewed-by: Sunil Khatri Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 86 +++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h | 5 ++ 3 files changed, 84 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3212fd78b012..68410ba1b944 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1249,6 +1249,7 @@ struct amdgpu_device { struct list_head userq_mgr_list; struct mutex userq_mutex; + bool userq_halt_for_enforce_isolation; }; static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index 1867520ba258..e944d05685dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -275,6 +275,7 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) const struct amdgpu_userq_funcs *uq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_db_info db_info; + bool skip_map_queue; uint64_t index; int qid, r = 0; @@ -348,6 +349,7 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) goto unlock; } + qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL); if (qid < 0) { DRM_ERROR("Failed to allocate a queue id\n"); @@ -358,15 +360,28 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) goto unlock; } - r = uq_funcs->map(uq_mgr, queue); - if (r) { - DRM_ERROR("Failed to map Queue\n"); - idr_remove(&uq_mgr->userq_idr, qid); - amdgpu_userq_fence_driver_free(queue); - uq_funcs->mqd_destroy(uq_mgr, queue); - kfree(queue); - goto unlock; + /* don't map the queue if scheduling is halted */ + mutex_lock(&adev->userq_mutex); + if (adev->userq_halt_for_enforce_isolation && + ((queue->queue_type == AMDGPU_HW_IP_GFX) || + (queue->queue_type == AMDGPU_HW_IP_COMPUTE))) + skip_map_queue = true; + else + skip_map_queue = false; + if (!skip_map_queue) { + r = uq_funcs->map(uq_mgr, queue); + if (r) { + mutex_unlock(&adev->userq_mutex); + DRM_ERROR("Failed to map Queue\n"); + idr_remove(&uq_mgr->userq_idr, qid); + amdgpu_userq_fence_driver_free(queue); + uq_funcs->mqd_destroy(uq_mgr, queue); + kfree(queue); + goto unlock; + } } + mutex_unlock(&adev->userq_mutex); + args->out.queue_id = qid; @@ -733,3 +748,58 @@ int amdgpu_userq_resume(struct amdgpu_device *adev) mutex_unlock(&adev->userq_mutex); return ret; } + +int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, + u32 idx) +{ + const struct amdgpu_userq_funcs *userq_funcs; + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_mgr *uqm, *tmp; + int queue_id; + int ret = 0; + + mutex_lock(&adev->userq_mutex); + if (adev->userq_halt_for_enforce_isolation) + dev_warn(adev->dev, "userq scheduling already stopped!\n"); + adev->userq_halt_for_enforce_isolation = true; + list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + cancel_delayed_work_sync(&uqm->resume_work); + idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { + if (((queue->queue_type == AMDGPU_HW_IP_GFX) || + (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && + (queue->xcp_id == idx)) { + userq_funcs = adev->userq_funcs[queue->queue_type]; + ret |= userq_funcs->unmap(uqm, queue); + } + } + } + mutex_unlock(&adev->userq_mutex); + return ret; +} + +int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, + u32 idx) +{ + const struct amdgpu_userq_funcs *userq_funcs; + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_mgr *uqm, *tmp; + int queue_id; + int ret = 0; + + mutex_lock(&adev->userq_mutex); + if (!adev->userq_halt_for_enforce_isolation) + dev_warn(adev->dev, "userq scheduling already started!\n"); + adev->userq_halt_for_enforce_isolation = false; + list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { + if (((queue->queue_type == AMDGPU_HW_IP_GFX) || + (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && + (queue->xcp_id == idx)) { + userq_funcs = adev->userq_funcs[queue->queue_type]; + ret |= userq_funcs->map(uqm, queue); + } + } + } + mutex_unlock(&adev->userq_mutex); + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h index db79141e1c1e..0701f33e6740 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h @@ -115,4 +115,9 @@ uint64_t amdgpu_userqueue_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, int amdgpu_userq_suspend(struct amdgpu_device *adev); int amdgpu_userq_resume(struct amdgpu_device *adev); +int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, + u32 idx); +int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, + u32 idx); + #endif -- 2.50.1 From 28fc3172e4204c8cdd8c70226ea02b0ae9930b69 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 21 Feb 2025 15:20:45 -0500 Subject: [PATCH 02/16] drm/amdgpu: rename enforce isolation variables Since they will be used for both KFD and KGD user queues, rename them from kfd to userq. No intended functional change. Acked-by: Sunil Khatri Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 32 +++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 6 ++-- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e966aefc2b0f..b96e0613ea7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4368,7 +4368,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_sync_create(&adev->isolation[i].active); amdgpu_sync_create(&adev->isolation[i].prev); } - mutex_init(&adev->gfx.kfd_sch_mutex); + mutex_init(&adev->gfx.userq_sch_mutex); mutex_init(&adev->gfx.workload_profile_mutex); mutex_init(&adev->vcn.workload_profile_mutex); mutex_init(&adev->userq_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 2c933d436e56..c58d32983c45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1947,39 +1947,40 @@ void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev, static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx, bool enable) { - mutex_lock(&adev->gfx.kfd_sch_mutex); + mutex_lock(&adev->gfx.userq_sch_mutex); if (enable) { /* If the count is already 0, it means there's an imbalance bug somewhere. * Note that the bug may be in a different caller than the one which triggers the * WARN_ON_ONCE. */ - if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) { + if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) { dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n"); goto unlock; } - adev->gfx.kfd_sch_req_count[idx]--; + adev->gfx.userq_sch_req_count[idx]--; - if (adev->gfx.kfd_sch_req_count[idx] == 0 && - adev->gfx.kfd_sch_inactive[idx]) { + if (adev->gfx.userq_sch_req_count[idx] == 0 && + adev->gfx.userq_sch_inactive[idx]) { schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx])); } } else { - if (adev->gfx.kfd_sch_req_count[idx] == 0) { + if (adev->gfx.userq_sch_req_count[idx] == 0) { cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work); - if (!adev->gfx.kfd_sch_inactive[idx]) { - amdgpu_amdkfd_stop_sched(adev, idx); - adev->gfx.kfd_sch_inactive[idx] = true; + if (!adev->gfx.userq_sch_inactive[idx]) { + if (adev->kfd.init_complete) + amdgpu_amdkfd_stop_sched(adev, idx); + adev->gfx.userq_sch_inactive[idx] = true; } } - adev->gfx.kfd_sch_req_count[idx]++; + adev->gfx.userq_sch_req_count[idx]++; } unlock: - mutex_unlock(&adev->gfx.kfd_sch_mutex); + mutex_unlock(&adev->gfx.userq_sch_mutex); } /** @@ -2024,12 +2025,11 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work) msecs_to_jiffies(1)); } else { /* Tell KFD to resume the runqueue */ - if (adev->kfd.init_complete) { - WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]); - WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]); + WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]); + WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]); + if (adev->kfd.init_complete) amdgpu_amdkfd_start_sched(adev, idx); - adev->gfx.kfd_sch_inactive[idx] = false; - } + adev->gfx.userq_sch_inactive[idx] = false; } mutex_unlock(&adev->enforce_isolation_mutex); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 91dd365cb1e6..ed54095e6ad6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -475,9 +475,9 @@ struct amdgpu_gfx { bool enable_cleaner_shader; struct amdgpu_isolation_work enforce_isolation[MAX_XCP]; /* Mutex for synchronizing KFD scheduler operations */ - struct mutex kfd_sch_mutex; - u64 kfd_sch_req_count[MAX_XCP]; - bool kfd_sch_inactive[MAX_XCP]; + struct mutex userq_sch_mutex; + u64 userq_sch_req_count[MAX_XCP]; + bool userq_sch_inactive[MAX_XCP]; unsigned long enforce_isolation_jiffies[MAX_XCP]; unsigned long enforce_isolation_time[MAX_XCP]; -- 2.50.1 From 8f23a97907d92e16d34d4a2a9dc793398afa34a6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 14 Apr 2025 13:11:10 -0400 Subject: [PATCH 03/16] drm/amdgpu/userq: integrate with enforce isolation Enforce isolation serializes access to the GFX IP. User queues are isolated in the MES scheduler, but we still need to serialize between kernel queues and user queues. For enforce isolation, group KGD user queues with KFD user queues. v2: split out variable renaming, add config guards v3: use new function names Reviewed-by: Sunil Khatri Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index c58d32983c45..e1dca45a152b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1970,6 +1970,9 @@ static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx, if (adev->gfx.userq_sch_req_count[idx] == 0) { cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work); if (!adev->gfx.userq_sch_inactive[idx]) { +#ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ + amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx); +#endif if (adev->kfd.init_complete) amdgpu_amdkfd_stop_sched(adev, idx); adev->gfx.userq_sch_inactive[idx] = true; @@ -2027,6 +2030,9 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work) /* Tell KFD to resume the runqueue */ WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]); WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]); +#ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ + amdgpu_userq_start_sched_for_enforce_isolation(adev, idx); +#endif if (adev->kfd.init_complete) amdgpu_amdkfd_start_sched(adev, idx); adev->gfx.userq_sch_inactive[idx] = false; -- 2.50.1 From 6027cbee190049629d3028789659e0763562e21e Mon Sep 17 00:00:00 2001 From: Wentao Liang Date: Mon, 14 Apr 2025 11:14:39 +0800 Subject: [PATCH 04/16] drm/amd/display: Add error check for avi and vendor infoframe setup function The function fill_stream_properties_from_drm_display_mode() calls the function drm_hdmi_avi_infoframe_from_display_mode() and the function drm_hdmi_vendor_infoframe_from_display_mode(), but does not check its return value. Log the error messages to prevent silent failure if either function fails. Signed-off-by: Wentao Liang Reviewed-by: Alex Hung Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 58e758732338..d3a2567fc5b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6437,6 +6437,7 @@ static void fill_stream_properties_from_drm_display_mode( struct amdgpu_dm_connector *aconnector = NULL; struct hdmi_vendor_infoframe hv_frame; struct hdmi_avi_infoframe avi_frame; + ssize_t err; if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) aconnector = to_amdgpu_dm_connector(connector); @@ -6483,9 +6484,17 @@ static void fill_stream_properties_from_drm_display_mode( } if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { - drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); + err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, + (struct drm_connector *)connector, + mode_in); + if (err < 0) + drm_err(connector->dev, "Failed to setup avi infoframe: %zd\n", err); timing_out->vic = avi_frame.video_code; - drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); + err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, + (struct drm_connector *)connector, + mode_in); + if (err < 0) + drm_err(connector->dev, "Failed to setup vendor infoframe: %zd\n", err); timing_out->hdmi_vic = hv_frame.vic; } -- 2.50.1 From fced8e7d2ddeba7f41b19e065f8c02a9abf9ac00 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Feb 2025 16:54:27 -0500 Subject: [PATCH 05/16] drm/amdgpu: convert userq UAPI _pad to flags Reuse the _pad field for flags. Reviewed-by: Sunil Khatri Reviewed-by: Jesse.Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 4 ++-- include/uapi/drm/amdgpu_drm.h | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index e944d05685dd..9a7ac85ff01c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -399,7 +399,7 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data, switch (args->in.op) { case AMDGPU_USERQ_OP_CREATE: - if (args->in._pad) + if (args->in.flags) return -EINVAL; r = amdgpu_userqueue_create(filp, args); if (r) @@ -410,7 +410,7 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data, if (args->in.ip_type || args->in.doorbell_handle || args->in.doorbell_offset || - args->in._pad || + args->in.flags || args->in.queue_va || args->in.queue_size || args->in.rptr_va || diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index ef97c0d78b8a..1a451907184c 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -355,7 +355,10 @@ struct drm_amdgpu_userq_in { * and doorbell_offset in the doorbell bo. */ __u32 doorbell_offset; - __u32 _pad; + /** + * @flags: flags used for queue parameters + */ + __u32 flags; /** * @queue_va: Virtual address of the GPU memory which holds the queue * object. The queue holds the workload packets. -- 2.50.1 From 024cc8a71aac8194fc2883782d36cbad6a9fe36b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Feb 2025 16:55:36 -0500 Subject: [PATCH 06/16] drm/amdgpu/userq: add UAPI for setting queue priority Allow the user to set a queue priority levels: 0 - normal low - most apps (maps to MES AMD_PRIORITY_LEVEL_NORMAL) 1 - low - background jobs (maps to MES AMD_PRIORITY_LEVEL_LOW) 2 - normal high - apps that need relative high (maps to MES AMD_PRIORITY_LEVEL_MEDIUM) 3 - high (admin only - for compositors) (maps to MES AMD_PRIORITY_LEVEL_HIGH) Reviewed-by: Sunil Khatri Reviewed-by: Jesse.Zhang Signed-off-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 1a451907184c..267ed4adcfb9 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -329,6 +329,15 @@ union drm_amdgpu_ctx { #define AMDGPU_USERQ_OP_CREATE 1 #define AMDGPU_USERQ_OP_FREE 2 +/* queue priority levels */ +/* low < normal low < normal high < high */ +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK 0x3 +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT 0 +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW 0 +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW 1 +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH 2 +#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH 3 /* admin only */ + /* * This structure is a container to pass input configuration * info for all supported userqueue related operations. -- 2.50.1 From 3d0a402e7cd11496c40e03b15828c054a70f6fbd Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Feb 2025 22:27:47 -0500 Subject: [PATCH 07/16] drm/amdgpu/mes11: add conversion for priority levels Convert driver priority levels to MES11 priority levels. At the moment they are the same, but they may not always be. Reviewed-by: Sunil Khatri Reviewed-by: Jesse.Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index f7aa45775ead..0a5b7a296f08 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -287,6 +287,23 @@ static int convert_to_mes_queue_type(int queue_type) return -1; } +static int convert_to_mes_priority_level(int priority_level) +{ + switch (priority_level) { + case AMDGPU_MES_PRIORITY_LEVEL_LOW: + return AMD_PRIORITY_LEVEL_LOW; + case AMDGPU_MES_PRIORITY_LEVEL_NORMAL: + default: + return AMD_PRIORITY_LEVEL_NORMAL; + case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM: + return AMD_PRIORITY_LEVEL_MEDIUM; + case AMDGPU_MES_PRIORITY_LEVEL_HIGH: + return AMD_PRIORITY_LEVEL_HIGH; + case AMDGPU_MES_PRIORITY_LEVEL_REALTIME: + return AMD_PRIORITY_LEVEL_REALTIME; + } +} + static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, struct mes_add_queue_input *input) { @@ -310,9 +327,9 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, mes_add_queue_pkt.gang_quantum = input->gang_quantum; mes_add_queue_pkt.gang_context_addr = input->gang_context_addr; mes_add_queue_pkt.inprocess_gang_priority = - input->inprocess_gang_priority; + convert_to_mes_priority_level(input->inprocess_gang_priority); mes_add_queue_pkt.gang_global_priority_level = - input->gang_global_priority_level; + convert_to_mes_priority_level(input->gang_global_priority_level); mes_add_queue_pkt.doorbell_offset = input->doorbell_offset; mes_add_queue_pkt.mqd_addr = input->mqd_addr; -- 2.50.1 From a83be6e4798efbfd3f6ca511806fbaab73a4d7e7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Feb 2025 22:29:15 -0500 Subject: [PATCH 08/16] drm/amdgpu/mes12: add conversion for priority levels Convert driver priority levels to MES11 priority levels. At the moment they are the same, but they may not always be. Reviewed-by: Sunil Khatri Reviewed-by: Jesse.Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mes_v12_0.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index b0e042a4cea1..1f7614dccb00 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -274,6 +274,23 @@ static int convert_to_mes_queue_type(int queue_type) return -1; } +static int convert_to_mes_priority_level(int priority_level) +{ + switch (priority_level) { + case AMDGPU_MES_PRIORITY_LEVEL_LOW: + return AMD_PRIORITY_LEVEL_LOW; + case AMDGPU_MES_PRIORITY_LEVEL_NORMAL: + default: + return AMD_PRIORITY_LEVEL_NORMAL; + case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM: + return AMD_PRIORITY_LEVEL_MEDIUM; + case AMDGPU_MES_PRIORITY_LEVEL_HIGH: + return AMD_PRIORITY_LEVEL_HIGH; + case AMDGPU_MES_PRIORITY_LEVEL_REALTIME: + return AMD_PRIORITY_LEVEL_REALTIME; + } +} + static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes, struct mes_add_queue_input *input) { @@ -297,9 +314,9 @@ static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes, mes_add_queue_pkt.gang_quantum = input->gang_quantum; mes_add_queue_pkt.gang_context_addr = input->gang_context_addr; mes_add_queue_pkt.inprocess_gang_priority = - input->inprocess_gang_priority; + convert_to_mes_priority_level(input->inprocess_gang_priority); mes_add_queue_pkt.gang_global_priority_level = - input->gang_global_priority_level; + convert_to_mes_priority_level(input->gang_global_priority_level); mes_add_queue_pkt.doorbell_offset = input->doorbell_offset; mes_add_queue_pkt.mqd_addr = input->mqd_addr; -- 2.50.1 From 9546c05628a7782508ce4b779b4b00c2276b9e38 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Feb 2025 22:38:08 -0500 Subject: [PATCH 09/16] drm/amdgpu/userq: add priorty to user queue structure So we can track this when we create user queues. Reviewed-by: Sunil Khatri Reviewed-by: Jesse.Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h index 0701f33e6740..b2da513b3d02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h @@ -57,6 +57,7 @@ struct amdgpu_usermode_queue { struct amdgpu_userq_fence_driver *fence_drv; struct dma_fence *last_fence; u32 xcp_id; + int priority; }; struct amdgpu_userq_funcs { -- 2.50.1 From 23a650bb9f248133095a2e4d7ba7bcbdba9798ab Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Feb 2025 22:47:11 -0500 Subject: [PATCH 10/16] drm/amdgpu/userq/mes: handle user queue priority Handle the queue priority set by the user. Reviewed-by: Sunil Khatri Reviewed-by: Jesse.Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mes_userqueue.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index 34f8f63747d5..3b45ab19fc3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -96,6 +96,21 @@ mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr, return 0; } +static int convert_to_mes_priority(int priority) +{ + switch (priority) { + case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW: + default: + return AMDGPU_MES_PRIORITY_LEVEL_NORMAL; + case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW: + return AMDGPU_MES_PRIORITY_LEVEL_LOW; + case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH: + return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM; + case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH: + return AMDGPU_MES_PRIORITY_LEVEL_HIGH; + } +} + static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) { @@ -121,7 +136,7 @@ static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr, queue_input.process_context_addr = ctx->gpu_addr; queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; - queue_input.gang_global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; + queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority); queue_input.process_id = queue->vm->pasid; queue_input.queue_type = queue->queue_type; -- 2.50.1 From a5c34299d866f099c85567117717c18842e67da8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Feb 2025 22:56:26 -0500 Subject: [PATCH 11/16] drm/amdgpu/userq: enable support for queue priorities Enable users to create queues at different priority levels. The highest level is restricted to drm master. Reviewed-by: Sunil Khatri Reviewed-by: Jesse.Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index 9a7ac85ff01c..149993ec537b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -22,6 +22,7 @@ * */ +#include #include #include @@ -266,6 +267,21 @@ amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id) return r; } +static int amdgpu_userq_priority_permit(struct drm_file *filp, + int priority) +{ + if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH) + return 0; + + if (capable(CAP_SYS_NICE)) + return 0; + + if (drm_is_current_master(filp)) + return 0; + + return -EACCES; +} + static int amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) { @@ -278,6 +294,9 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) bool skip_map_queue; uint64_t index; int qid, r = 0; + int priority = + (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> + AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; /* Usermode queues are only supported for GFX IP as of now */ if (args->in.ip_type != AMDGPU_HW_IP_GFX && @@ -287,6 +306,10 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) return -EINVAL; } + r = amdgpu_userq_priority_permit(filp, priority); + if (r) + return r; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { dev_err(adev->dev, "pm_runtime_get_sync() failed for userqueue create\n"); @@ -319,6 +342,7 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) queue->doorbell_handle = args->in.doorbell_handle; queue->queue_type = args->in.ip_type; queue->vm = &fpriv->vm; + queue->priority = priority; db_info.queue_type = queue->queue_type; db_info.doorbell_handle = queue->doorbell_handle; @@ -399,7 +423,7 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data, switch (args->in.op) { case AMDGPU_USERQ_OP_CREATE: - if (args->in.flags) + if (args->in.flags & ~AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) return -EINVAL; r = amdgpu_userqueue_create(filp, args); if (r) -- 2.50.1 From b22659d5d3520c82295981797e75de525365a411 Mon Sep 17 00:00:00 2001 From: "Jesse.zhang@amd.com" Date: Fri, 11 Apr 2025 14:48:52 +0800 Subject: [PATCH 12/16] drm/amdgpu: switch amdgpu_sdma_reset_engine to use the new sdma function pointers Replace old callback mechanism with direct calls to stop/start functions. Suggested-by: Alex Deucher Signed-off-by: Jesse Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 34 +++--------------------- 1 file changed, 4 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index c3c6f03190c8..7139d574c23e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -559,16 +559,10 @@ void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct * @adev: Pointer to the AMDGPU device * @instance_id: ID of the SDMA engine instance to reset * - * This function performs the following steps: - * 1. Calls all registered pre_reset callbacks to allow KFD and AMDGPU to save their state. - * 2. Resets the specified SDMA engine instance. - * 3. Calls all registered post_reset callbacks to allow KFD and AMDGPU to restore their state. - * * Returns: 0 on success, or a negative error code on failure. */ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id) { - struct sdma_on_reset_funcs *funcs; int ret = 0; struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id]; struct amdgpu_ring *gfx_ring = &sdma_instance->ring; @@ -590,18 +584,8 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id) page_sched_stopped = true; } - /* Invoke all registered pre_reset callbacks */ - list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) { - if (funcs->pre_reset) { - ret = funcs->pre_reset(adev, instance_id); - if (ret) { - dev_err(adev->dev, - "beforeReset callback failed for instance %u: %d\n", - instance_id, ret); - goto exit; - } - } - } + if (sdma_instance->funcs->stop_kernel_queue) + sdma_instance->funcs->stop_kernel_queue(gfx_ring); /* Perform the SDMA reset for the specified instance */ ret = amdgpu_dpm_reset_sdma(adev, 1 << instance_id); @@ -610,18 +594,8 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id) goto exit; } - /* Invoke all registered post_reset callbacks */ - list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) { - if (funcs->post_reset) { - ret = funcs->post_reset(adev, instance_id); - if (ret) { - dev_err(adev->dev, - "afterReset callback failed for instance %u: %d\n", - instance_id, ret); - goto exit; - } - } - } + if (sdma_instance->funcs->start_kernel_queue) + sdma_instance->funcs->start_kernel_queue(gfx_ring); exit: /* Restart the scheduler's work queue for the GFX and page rings -- 2.50.1 From 5c3e7c49538e2ddad10296a318c225bbb3d37d20 Mon Sep 17 00:00:00 2001 From: "Jesse.zhang@amd.com" Date: Fri, 11 Apr 2025 15:26:18 +0800 Subject: [PATCH 13/16] drm/amdgpu: Implement SDMA soft reset directly for v5.x This patch introduces a new function `amdgpu_sdma_soft_reset` to handle SDMA soft resets directly, rather than relying on the DPM interface. 1. **New `amdgpu_sdma_soft_reset` Function**: - Implements a soft reset for SDMA engines by directly writing to the hardware registers. - Handles SDMA versions 4.x and 5.x separately: - For SDMA 4.x, the existing `amdgpu_dpm_reset_sdma` function is used for backward compatibility. - For SDMA 5.x, the driver directly manipulates the `GRBM_SOFT_RESET` register to reset the specified SDMA instance. 2. **Integration into `amdgpu_sdma_reset_engine`**: - The `amdgpu_sdma_soft_reset` function is called during the SDMA reset process, replacing the previous call to `amdgpu_dpm_reset_sdma`. v2: r should default to an error (Alex) Suggested-by: Alex Deucher Signed-off-by: Jesse Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 38 +++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 7139d574c23e..f456a5a12bb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -26,6 +26,8 @@ #include "amdgpu_sdma.h" #include "amdgpu_ras.h" #include "amdgpu_reset.h" +#include "gc/gc_10_1_0_offset.h" +#include "gc/gc_10_3_0_sh_mask.h" #define AMDGPU_CSA_SDMA_SIZE 64 /* SDMA CSA reside in the 3rd page of CSA */ @@ -554,6 +556,40 @@ void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct list_add_tail(&funcs->list, &adev->sdma.reset_callback_list); } +static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id) +{ + struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id]; + int r = -EOPNOTSUPP; + + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(4, 4, 2): + case IP_VERSION(4, 4, 4): + case IP_VERSION(4, 4, 5): + /* For SDMA 4.x, use the existing DPM interface for backward compatibility */ + r = amdgpu_dpm_reset_sdma(adev, 1 << instance_id); + break; + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 1): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 5): + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 2): + case IP_VERSION(5, 2, 4): + case IP_VERSION(5, 2, 5): + case IP_VERSION(5, 2, 6): + case IP_VERSION(5, 2, 3): + case IP_VERSION(5, 2, 1): + case IP_VERSION(5, 2, 7): + if (sdma_instance->funcs->soft_reset_kernel_queue) + r = sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id); + break; + default: + break; + } + + return r; +} + /** * amdgpu_sdma_reset_engine - Reset a specific SDMA engine * @adev: Pointer to the AMDGPU device @@ -588,7 +624,7 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id) sdma_instance->funcs->stop_kernel_queue(gfx_ring); /* Perform the SDMA reset for the specified instance */ - ret = amdgpu_dpm_reset_sdma(adev, 1 << instance_id); + ret = amdgpu_sdma_soft_reset(adev, instance_id); if (ret) { dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id); goto exit; -- 2.50.1 From e56d4bf57fabe009494e9b81ba64e21ebe7d35c2 Mon Sep 17 00:00:00 2001 From: "Jesse.zhang@amd.com" Date: Mon, 14 Apr 2025 16:05:33 +0800 Subject: [PATCH 14/16] drm/amdgpu/: drm/amdgpu: Register the new sdma function pointers for sdma_v5_0 Register stop/start/soft_reset queue functions for SDMA IP versions v5.0. Suggested-by: Alex Deucher Signed-off-by: Jesse Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 78 +++++++++++++++++++------- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index e1348b6d9c6a..48ee6dc1e3dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -112,6 +112,8 @@ static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev); +static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring); +static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring); static const struct soc15_reg_golden golden_settings_sdma_5[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107), @@ -1323,6 +1325,36 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } +static int sdma_v5_0_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id) +{ + u32 grbm_soft_reset; + u32 tmp; + + grbm_soft_reset = REG_SET_FIELD(0, + GRBM_SOFT_RESET, SOFT_RESET_SDMA0, + 1); + grbm_soft_reset <<= instance_id; + + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~grbm_soft_reset; + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + return 0; +} + +static const struct amdgpu_sdma_funcs sdma_v5_0_sdma_funcs = { + .stop_kernel_queue = &sdma_v5_0_stop_queue, + .start_kernel_queue = &sdma_v5_0_restore_queue, + .soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine, +}; + static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -1365,6 +1397,7 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) return r; for (i = 0; i < adev->sdma.num_instances; i++) { + adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs; ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; ring->use_doorbell = true; @@ -1506,8 +1539,16 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block) static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) { struct amdgpu_device *adev = ring->adev; - int i, j, r; - u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; + u32 inst_id = ring->me; + + return amdgpu_sdma_reset_engine(adev, inst_id); +} + +static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring) +{ + u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, stat1_reg; + struct amdgpu_device *adev = ring->adev; + int i, j, r = 0; if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -1562,30 +1603,25 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0); WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl); +err0: + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} - /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */ - preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT)); - preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt); - - soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i; - - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); - - udelay(50); - - soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i); - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); +static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 inst_id = ring->me; + u32 freeze; + int r; + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* unfreeze*/ - freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE)); freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze); - r = sdma_v5_0_gfx_resume_instance(adev, i, true); - -err0: + r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true); amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return r; } -- 2.50.1 From 47454f2dc0bf6774c8dbe7226bcf50ba24a788a8 Mon Sep 17 00:00:00 2001 From: "Jesse.zhang@amd.com" Date: Mon, 14 Apr 2025 16:06:51 +0800 Subject: [PATCH 15/16] drm/amdgpu: Register the new sdma function pointers for sdma_v5_2 Register stop/start/soft_reset queue functions for SDMA IP versions v5.2. Suggested-by: Alex Deucher Signed-off-by: Jesse Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 94 +++++++++++++++----------- 1 file changed, 56 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 964f12afac9e..fc95c2875ba5 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -113,6 +113,8 @@ static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev); +static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring); +static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring); static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) { @@ -759,37 +761,49 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev) return 0; } -static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block) +static int sdma_v5_2_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id) { - struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset; u32 tmp; - int i; - for (i = 0; i < adev->sdma.num_instances; i++) { - grbm_soft_reset = REG_SET_FIELD(0, - GRBM_SOFT_RESET, SOFT_RESET_SDMA0, - 1); - grbm_soft_reset <<= i; + grbm_soft_reset = REG_SET_FIELD(0, + GRBM_SOFT_RESET, SOFT_RESET_SDMA0, + 1); + grbm_soft_reset <<= instance_id; - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - udelay(50); + udelay(50); - tmp &= ~grbm_soft_reset; - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp &= ~grbm_soft_reset; + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + return 0; +} +static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + sdma_v5_2_soft_reset_engine(adev, i); udelay(50); } return 0; } +static const struct amdgpu_sdma_funcs sdma_v5_2_sdma_funcs = { + .stop_kernel_queue = &sdma_v5_2_stop_queue, + .start_kernel_queue = &sdma_v5_2_restore_queue, + .soft_reset_kernel_queue = &sdma_v5_2_soft_reset_engine, +}; + /** * sdma_v5_2_start - setup and start the async dma engines * @@ -1302,6 +1316,7 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) } for (i = 0; i < adev->sdma.num_instances; i++) { + adev->sdma.instance[i].funcs = &sdma_v5_2_sdma_funcs; ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; ring->use_doorbell = true; @@ -1437,8 +1452,16 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block) static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) { struct amdgpu_device *adev = ring->adev; - int i, j, r; - u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; + u32 inst_id = ring->me; + + return amdgpu_sdma_reset_engine(adev, inst_id); +} + +static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring) +{ + u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, stat1_reg; + struct amdgpu_device *adev = ring->adev; + int i, j, r = 0; if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -1495,31 +1518,26 @@ static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0); WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl); - /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */ - preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT)); - preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt); - - soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i; - - - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); - - udelay(50); - - soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i); +err0: + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); +static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 inst_id = ring->me; + u32 freeze; + int r; + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* unfreeze and unhalt */ - freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE)); freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + WREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze); - r = sdma_v5_2_gfx_resume_instance(adev, i, true); + r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true); -err0: amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return r; } -- 2.50.1 From 574f4b5562cc11da7beb8c14b51a846da8263f89 Mon Sep 17 00:00:00 2001 From: "Jesse.zhang@amd.com" Date: Wed, 16 Apr 2025 14:03:02 +0800 Subject: [PATCH 16/16] drm/amdgpu: optimize queue reset and stop logic for sdma_v5_0 This patch refactors the SDMA v5.0 queue reset and stop logic to improve code readability, maintainability, and performance. The key changes include: 1. **Generalized `sdma_v5_0_gfx_stop` Function**: - Added an `inst_mask` parameter to allow stopping specific SDMA instances instead of all instances. This is useful for resetting individual queues. 2. **Simplified `sdma_v5_0_reset_queue` Function**: - Removed redundant loops and checks by directly using the `ring->me` field to identify the SDMA instance. - Reused the `sdma_v5_0_gfx_stop` function to stop the queue, reducing code duplication. v1: The general coding style is to declare variables like "i" or "r" last. E.g. longest lines first and short lasts. (Chritian) Signed-off-by: Jesse Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 31 ++++++++------------------ 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 48ee6dc1e3dc..9505ae96fbec 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -557,15 +557,15 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se * sdma_v5_0_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer - * + * @inst_mask: mask of dma engine instances to be disabled * Stop the gfx async dma ring buffers (NAVI10). */ -static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) +static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask) { u32 rb_cntl, ib_cntl; int i; - for (i = 0; i < adev->sdma.num_instances; i++) { + for_each_inst(i, inst_mask) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); @@ -657,9 +657,11 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl; int i; + uint32_t inst_mask; + inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); if (!enable) { - sdma_v5_0_gfx_stop(adev); + sdma_v5_0_gfx_stop(adev, 1 << inst_mask); sdma_v5_0_rlc_stop(adev); } @@ -1546,33 +1548,18 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring) { - u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, stat1_reg; + u32 f32_cntl, freeze, cntl, stat1_reg; struct amdgpu_device *adev = ring->adev; int i, j, r = 0; if (amdgpu_sriov_vf(adev)) return -EINVAL; - for (i = 0; i < adev->sdma.num_instances; i++) { - if (ring == &adev->sdma.instance[i].ring) - break; - } - - if (i == adev->sdma.num_instances) { - DRM_ERROR("sdma instance not found\n"); - return -EINVAL; - } - + i = ring->me; amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* stop queue */ - ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - - rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + sdma_v5_0_gfx_stop(adev, 1 << i); /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); -- 2.50.1