From 8aaeb25327ba62443d306238cd621c7fcbe22115 Mon Sep 17 00:00:00 2001 From: Meenakshikumar Somasundaram Date: Tue, 8 Apr 2025 11:37:58 -0400 Subject: [PATCH 01/16] drm/amd/display: Fix pixel rate divider policy for 1 pixel per cycle config [Why] Pixel rate dividor was not programmed correctly for 1 pixel per cycle configuration for empty tu case. [How] Included check for empty tu when pixel rate dividor values were selected. Reviewed-by: Michael Strauss Signed-off-by: Meenakshikumar Somasundaram Signed-off-by: Zaeem Mohamed Tested-by: Mark Broadworth Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 4 +++- drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index cd0adf72b223..a0b05b9ef660 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1181,6 +1181,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign struct dc_stream_state *stream = pipe_ctx->stream; unsigned int odm_combine_factor = 0; bool two_pix_per_container = false; + struct dce_hwseq *hws = stream->ctx->dc->hwseq; two_pix_per_container = pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing); odm_combine_factor = get_odm_config(pipe_ctx, NULL); @@ -1201,7 +1202,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign } else { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_4; - if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) + if ((odm_combine_factor == 2) || (hws->funcs.is_dp_dig_pixel_rate_div_policy && + hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx))) *k2_div = PIXEL_RATE_DIV_BY_2; } } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index a4e6b6479983..58f2be2a326b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -158,7 +158,7 @@ static const struct hwseq_private_funcs dcn351_private_funcs = { .set_mcm_luts = dcn32_set_mcm_luts, .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, - .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy, .dsc_pg_control = dcn35_dsc_pg_control, .dsc_pg_status = dcn32_dsc_pg_status, .enable_plane = dcn35_enable_plane, -- 2.51.0 From 87ceff6136dbbfc8d530a1be7e3ba1282548d6dc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Feb 2025 21:25:22 -0500 Subject: [PATCH 02/16] drm/amdgpu/userq/mes: pass the secure flag to mqd init So that we initialize the MQD as a secure queue. Reviewed-by: Sunil Khatri Reviewed-by: Jesse.Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mes_userqueue.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index 3b45ab19fc3f..b3157df8ae10 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -266,6 +266,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; userq_props->hqd_active = false; + userq_props->tmz_queue = + mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; kfree(compute_mqd); } else if (queue->queue_type == AMDGPU_HW_IP_GFX) { struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11; @@ -285,6 +287,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->shadow_addr = mqd_gfx_v11->shadow_va; userq_props->csa_addr = mqd_gfx_v11->csa_va; + userq_props->tmz_queue = + mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; kfree(mqd_gfx_v11); } else if (queue->queue_type == AMDGPU_HW_IP_DMA) { struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11; -- 2.51.0 From 4ec2141d23d3bbede9347a60580a40d9e11089ff Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Feb 2025 21:33:01 -0500 Subject: [PATCH 03/16] drm/amdgpu/userq: enable support for secure queues Enable users to create secure GFX/compute queues. Reviewed-by: Sunil Khatri Tested-by: Jesse.Zhang Reviewed-by: Jesse.Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index 5f87cc8b5bf4..4e02d6cc66b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -311,6 +311,14 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) if (r) return r; + if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && + (args->in.ip_type != AMDGPU_HW_IP_GFX) && + (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && + !amdgpu_is_tmz(adev)) { + drm_err(adev_to_drm(adev), "Secure only supported on GFX/Compute queues\n"); + return -EINVAL; + } + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { dev_err(adev->dev, "pm_runtime_get_sync() failed for userqueue create\n"); @@ -424,7 +432,8 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data, switch (args->in.op) { case AMDGPU_USERQ_OP_CREATE: - if (args->in.flags & ~AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) + if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK | + AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE)) return -EINVAL; r = amdgpu_userqueue_create(filp, args); if (r) -- 2.51.0 From 4b27406380b0b9ada6b4893bc8f6766dd34fff36 Mon Sep 17 00:00:00 2001 From: Arunpravin Paneer Selvam Date: Fri, 11 Apr 2025 15:08:30 +0530 Subject: [PATCH 04/16] drm/amdgpu: Add queue id support to the user queue wait IOCTL MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Add queue id support to the user queue wait IOCTL drm_amdgpu_userq_wait structure. This is required to retrieve the wait user queue and maintain the fence driver references in it so that the user queue in the same context releases their reference to the fence drivers at some point before queue destruction. Otherwise, we would gather those references until we don't have any more space left and crash. v2: Modify the UAPI comment as per the mesa and libdrm UAPI comment. Libdrm MR: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/408 Mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/34493 Signed-off-by: Arunpravin Paneer Selvam Suggested-by: Christian König Reviewed-by: Christian König Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_userq_fence.c | 20 +++++++++++-------- .../gpu/drm/amd/amdgpu/amdgpu_userq_fence.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 1 - include/uapi/drm/amdgpu_drm.h | 6 ++++++ 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c index 0a3032e01c34..ca198360cfda 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c @@ -91,7 +91,6 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev, spin_lock_init(&fence_drv->fence_list_lock); fence_drv->adev = adev; - fence_drv->fence_drv_xa_ptr = &userq->fence_drv_xa; fence_drv->context = dma_fence_context_alloc(1); get_task_comm(fence_drv->timeline_name, current); @@ -611,6 +610,9 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, u32 num_syncobj, num_read_bo_handles, num_write_bo_handles; struct drm_amdgpu_userq_fence_info *fence_info = NULL; struct drm_amdgpu_userq_wait *wait_info = data; + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr; + struct amdgpu_usermode_queue *waitq; struct drm_gem_object **gobj_write; struct drm_gem_object **gobj_read; struct dma_fence **fences = NULL; @@ -860,6 +862,10 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, fences[num_fences++] = fence; } + waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id); + if (!waitq) + goto free_fences; + for (i = 0, cnt = 0; i < num_fences; i++) { struct amdgpu_userq_fence_driver *fence_drv; struct amdgpu_userq_fence *userq_fence; @@ -888,14 +894,12 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, * Otherwise, we would gather those references until we don't * have any more space left and crash. */ - if (fence_drv->fence_drv_xa_ptr) { - r = xa_alloc(fence_drv->fence_drv_xa_ptr, &index, fence_drv, - xa_limit_32b, GFP_KERNEL); - if (r) - goto free_fences; + r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv, + xa_limit_32b, GFP_KERNEL); + if (r) + goto free_fences; - amdgpu_userq_fence_driver_get(fence_drv); - } + amdgpu_userq_fence_driver_get(fence_drv); /* Store drm syncobj's gpu va address and value */ fence_info[cnt].va = fence_drv->va; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h index 7bbae238cca0..2af4e0c15773 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h @@ -55,7 +55,6 @@ struct amdgpu_userq_fence_driver { spinlock_t fence_list_lock; struct list_head fences; struct amdgpu_device *adev; - struct xarray *fence_drv_xa_ptr; char timeline_name[TASK_COMM_LEN]; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index 4e02d6cc66b5..8f6510b78dee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -51,7 +51,6 @@ amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr, } uq_funcs->mqd_destroy(uq_mgr, queue); - queue->fence_drv->fence_drv_xa_ptr = NULL; amdgpu_userq_fence_driver_free(queue); idr_remove(&uq_mgr->userq_idr, queue_id); kfree(queue); diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 284ac25ab5c4..1fd96474e64c 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -515,6 +515,12 @@ struct drm_amdgpu_userq_fence_info { }; struct drm_amdgpu_userq_wait { + /** + * @waitq_id: Queue handle used by the userq wait IOCTL to retrieve the + * wait queue and maintain the fence driver references in it. + */ + __u32 waitq_id; + __u32 pad; /** * @syncobj_handles: The list of syncobj handles submitted by the user queue * job to get the va/value pairs. -- 2.51.0 From 11772eb73bb75558b530162fc0eb669e5bbc1d19 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 16 Apr 2025 16:43:52 -0400 Subject: [PATCH 05/16] drm/amdgpu/userq: add a helper to check which IPs are enabled Add a helper to get a mask of IPs which support user queues. Use this in the INFO IOCTL to get the IP mask to replace the current code. Reviewed-by: Prike Liang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 7 +------ drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h | 2 ++ 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 3d319687c1c9..151366ecc0af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1009,12 +1009,7 @@ out: } } - if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) - dev_info->userq_ip_mask |= (1 << AMDGPU_HW_IP_GFX); - if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) - dev_info->userq_ip_mask |= (1 << AMDGPU_HW_IP_COMPUTE); - if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) - dev_info->userq_ip_mask |= (1 << AMDGPU_HW_IP_DMA); + dev_info->userq_ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); ret = copy_to_user(out, dev_info, min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index 8f6510b78dee..59488acd89fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -31,6 +31,19 @@ #include "amdgpu_userqueue.h" #include "amdgpu_userq_fence.h" +u32 amdgpu_userqueue_get_supported_ip_mask(struct amdgpu_device *adev) +{ + int i; + u32 userq_ip_mask = 0; + + for (i = 0; i < AMDGPU_HW_IP_NUM; i++) { + if (adev->userq_funcs[i]) + userq_ip_mask |= (1 << i); + } + + return userq_ip_mask; +} + static void amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h index b2da513b3d02..b49f147eb69c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h @@ -113,6 +113,8 @@ uint64_t amdgpu_userqueue_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_db_info *db_info, struct drm_file *filp); +u32 amdgpu_userqueue_get_supported_ip_mask(struct amdgpu_device *adev); + int amdgpu_userq_suspend(struct amdgpu_device *adev); int amdgpu_userq_resume(struct amdgpu_device *adev); -- 2.51.0 From 19e743f0fb730e4dac43e37bb71d1ebdb622b3cf Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Tue, 1 Apr 2025 12:06:35 -0400 Subject: [PATCH 06/16] drm/amd/display: Refactor SubVP cursor limiting logic [WHY] There are several gaps that can result in SubVP being enabled with incompatible HW cursor sizes, and unjust restrictions to cursor size due to wrong predictions on future usage of SubVP [HOW] - remove "prediction" logic in favor of tagging based on previous SubVP usage - block SubVP if current HW cursor settings are incompatible - provide interface for DM to determine if HW cursor should be disabled due to an attempt to enable SubVP Reviewed-by: Alvin Lee Signed-off-by: Dillon Varone Signed-off-by: Zaeem Mohamed Tested-by: Mark Broadworth Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 52 ++++++++-- .../gpu/drm/amd/display/dc/core/dc_debug.c | 2 + .../gpu/drm/amd/display/dc/core/dc_resource.c | 48 ++-------- .../gpu/drm/amd/display/dc/core/dc_state.c | 96 +++++++++++++++++++ .../gpu/drm/amd/display/dc/core/dc_stream.c | 70 +++++++++++--- drivers/gpu/drm/amd/display/dc/dc.h | 4 + .../gpu/drm/amd/display/dc/dc_state_priv.h | 20 ++++ drivers/gpu/drm/amd/display/dc/dc_stream.h | 11 +++ .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 1 + .../dc/dml2/dml21/dml21_translation_helper.c | 9 +- .../amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 2 +- .../amd/display/dc/hwss/dcn401/dcn401_hwseq.c | 2 +- .../gpu/drm/amd/display/dc/inc/core_status.h | 3 + .../gpu/drm/amd/display/dc/inc/core_types.h | 5 +- drivers/gpu/drm/amd/display/dc/inc/resource.h | 2 - .../dc/resource/dce100/dce100_resource.c | 4 +- .../dc/resource/dce110/dce110_resource.c | 4 +- .../dc/resource/dce112/dce112_resource.c | 4 +- .../dc/resource/dce112/dce112_resource.h | 2 +- .../dc/resource/dce60/dce60_resource.c | 4 +- .../dc/resource/dce80/dce80_resource.c | 4 +- .../dc/resource/dcn10/dcn10_resource.c | 5 +- .../dc/resource/dcn20/dcn20_resource.c | 6 +- .../dc/resource/dcn20/dcn20_resource.h | 2 +- .../dc/resource/dcn21/dcn21_resource.c | 6 +- .../dc/resource/dcn30/dcn30_resource.c | 4 +- .../dc/resource/dcn30/dcn30_resource.h | 2 +- .../dc/resource/dcn31/dcn31_resource.c | 4 +- .../dc/resource/dcn31/dcn31_resource.h | 2 +- .../dc/resource/dcn314/dcn314_resource.c | 4 +- .../dc/resource/dcn314/dcn314_resource.h | 2 +- .../dc/resource/dcn32/dcn32_resource.c | 65 +++++++++++-- .../dc/resource/dcn32/dcn32_resource.h | 6 +- .../dc/resource/dcn321/dcn321_resource.c | 3 +- .../dc/resource/dcn35/dcn35_resource.c | 6 +- .../dc/resource/dcn351/dcn351_resource.c | 6 +- .../dc/resource/dcn36/dcn36_resource.c | 6 +- .../dc/resource/dcn401/dcn401_resource.c | 49 ++++++++-- .../dc/resource/dcn401/dcn401_resource.h | 2 +- 39 files changed, 400 insertions(+), 129 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 66e23507eb82..3f56f9a1250c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -37,6 +37,7 @@ #include "dc_state.h" #include "dc_state_priv.h" #include "dc_plane_priv.h" +#include "dc_stream_priv.h" #include "gpio_service_interface.h" #include "clk_mgr.h" @@ -2886,7 +2887,7 @@ static enum surface_update_type check_update_surfaces_for_stream( int i; enum surface_update_type overall_type = UPDATE_TYPE_FAST; - if (dc->idle_optimizations_allowed) + if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc)) overall_type = UPDATE_TYPE_FULL; if (stream_status == NULL || stream_status->plane_count != surface_count) @@ -3290,7 +3291,7 @@ static void copy_stream_update_to_stream(struct dc *dc, if (dsc_validate_context) { stream->timing.dsc_cfg = *update->dsc_config; stream->timing.flags.DSC = enable_dsc; - if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { + if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) { stream->timing.dsc_cfg = old_dsc_cfg; stream->timing.flags.DSC = old_dsc_enabled; update->dsc_config = NULL; @@ -4608,7 +4609,7 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); /* commit minimal state */ - if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { + if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) { /* prevent underflow and corruption when reconfiguring pipes */ force_vsync_flip_in_minimal_transition_context(minimal_transition_context); } else { @@ -5128,7 +5129,7 @@ static bool update_planes_and_stream_v1(struct dc *dc, copy_stream_update_to_stream(dc, context, stream, stream_update); if (update_type >= UPDATE_TYPE_FULL) { - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { + if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) { DC_ERROR("Mode validation failed for stream update!\n"); dc_state_release(context); return false; @@ -6272,15 +6273,22 @@ bool dc_abm_save_restore( void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) { unsigned int i; - bool subvp_sw_cursor_req = false; + unsigned int max_cursor_size = dc->caps.max_cursor_size; + unsigned int stream_cursor_size; - for (i = 0; i < dc->current_state->stream_count; i++) { - if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) { - subvp_sw_cursor_req = true; - break; + if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) { + for (i = 0; i < dc->current_state->stream_count; i++) { + stream_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, + dc->current_state, + dc->current_state->streams[i]); + + if (stream_cursor_size < max_cursor_size) { + max_cursor_size = stream_cursor_size; + } } } - properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size; + + properties->cursor_size_limit = max_cursor_size; } /** @@ -6346,3 +6354,27 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) else return 0; } + +bool dc_is_cursor_limit_pending(struct dc *dc) +{ + uint32_t i; + + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc_stream_is_cursor_limit_pending(dc, dc->current_state->streams[i])) + return true; + } + + return false; +} + +bool dc_can_clear_cursor_limit(struct dc *dc) +{ + uint32_t i; + + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc_state_can_clear_stream_cursor_subvp_limit(dc->current_state->streams[i], dc->current_state)) + return true; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 650e89825968..7551d0a3fe82 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -266,6 +266,8 @@ char *dc_status_to_str(enum dc_status status) return "Fail dp payload allocation"; case DC_FAIL_DP_LINK_BANDWIDTH: return "Insufficient DP link bandwidth"; + case DC_FAIL_HW_CURSOR_SUPPORT: + return "HW Cursor not supported"; case DC_ERROR_UNEXPECTED: return "Unexpected error"; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 313a32248cd7..13559fe56062 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1342,32 +1342,6 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) data->viewport_c.y += src.y / vpc_div; } -static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream) -{ - uint32_t refresh_rate; - struct dc *dc = stream->ctx->dc; - - refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 + - stream->timing.v_total * stream->timing.h_total - (uint64_t)1); - refresh_rate = div_u64(refresh_rate, stream->timing.v_total); - refresh_rate = div_u64(refresh_rate, stream->timing.h_total); - - /* If there's any stream that fits the SubVP high refresh criteria, - * we must return true. This is because cursor updates are asynchronous - * with full updates, so we could transition into a SubVP config and - * remain in HW cursor mode if there's no cursor update which will - * then cause corruption. - */ - if ((refresh_rate >= 120 && refresh_rate <= 175 && - stream->timing.v_addressable >= 1080 && - stream->timing.v_addressable <= 2160) && - (dc->current_state->stream_count > 1 || - (dc->current_state->stream_count == 1 && !stream->allow_freesync))) - return true; - - return false; -} - static enum controller_dp_test_pattern convert_dp_to_controller_test_pattern( enum dp_test_pattern test_pattern) { @@ -4259,6 +4233,11 @@ enum dc_status dc_validate_with_context(struct dc *dc, } } + /* clear subvp cursor limitations */ + for (i = 0; i < context->stream_count; i++) { + dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false); + } + res = dc_validate_global_state(dc, context, fast_validate); /* calculate pixel rate divider after deciding pxiel clock & odm combine */ @@ -4385,8 +4364,7 @@ enum dc_status dc_validate_global_state( result = resource_build_scaling_params_for_context(dc, new_ctx); if (result == DC_OK) - if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) - result = DC_FAIL_BANDWIDTH_VALIDATE; + result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate); return result; } @@ -5538,20 +5516,6 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, return DC_OK; } -bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream) -{ - if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream)) - return true; - if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && - ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) - return true; - else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 && - ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) - return true; - - return false; -} - struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx) { return &pipe_ctx->plane_res.scl_data.dscl_prog_data; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 1b2cce127981..8800ea512a21 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -22,6 +22,7 @@ * Authors: AMD * */ +#include "dc_types.h" #include "core_types.h" #include "core_status.h" #include "dc_state.h" @@ -812,8 +813,12 @@ enum dc_status dc_state_add_phantom_stream(const struct dc *dc, if (phantom_stream_status) { phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM; phantom_stream_status->mall_stream_config.paired_stream = main_stream; + phantom_stream_status->mall_stream_config.subvp_limit_cursor_size = false; + phantom_stream_status->mall_stream_config.cursor_size_limit_subvp = false; } + dc_state_set_stream_subvp_cursor_limit(main_stream, state, true); + return res; } @@ -977,3 +982,94 @@ bool dc_state_is_fams2_in_use( return is_fams2_in_use; } + +void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream, + struct dc_state *state, + bool limit) +{ + struct dc_stream_status *stream_status; + + stream_status = dc_state_get_stream_status(state, stream); + + if (stream_status) { + stream_status->mall_stream_config.subvp_limit_cursor_size = limit; + } +} + +bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream, + struct dc_state *state) +{ + bool limit = false; + + struct dc_stream_status *stream_status; + + stream_status = dc_state_get_stream_status(state, stream); + + if (stream_status) { + limit = stream_status->mall_stream_config.subvp_limit_cursor_size; + } + + return limit; +} + +void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state, + bool limit) +{ + struct dc_stream_status *stream_status; + + stream_status = dc_state_get_stream_status(state, stream); + + if (stream_status) { + stream_status->mall_stream_config.cursor_size_limit_subvp = limit; + } +} + +bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state) +{ + bool limit = false; + + struct dc_stream_status *stream_status; + + stream_status = dc_state_get_stream_status(state, stream); + + if (stream_status) { + limit = stream_status->mall_stream_config.cursor_size_limit_subvp; + } + + return limit; +} + +bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state) +{ + bool can_clear_limit = false; + + struct dc_stream_status *stream_status; + + stream_status = dc_state_get_stream_status(state, stream); + + if (stream_status) { + can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, state) && + (stream_status->mall_stream_config.type == SUBVP_PHANTOM || + stream->hw_cursor_req || + !stream_status->mall_stream_config.subvp_limit_cursor_size || + !stream->cursor_position.enable || + dc_stream_check_cursor_attributes(stream, state, &stream->cursor_attributes)); + } + + return can_clear_limit; +} + +bool dc_state_is_subvp_in_use(struct dc_state *state) +{ + uint32_t i; + + for (i = 0; i < state->stream_count; i++) { + if (dc_state_get_stream_subvp_type(state, state->streams[i]) != SUBVP_NONE) + return true; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 0478dd856d8c..e6e41678525f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -265,13 +265,16 @@ void program_cursor_attributes( } /* - * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address + * dc_stream_check_cursor_attributes() - Check validitity of cursor attributes and surface address */ -bool dc_stream_set_cursor_attributes( - struct dc_stream_state *stream, +bool dc_stream_check_cursor_attributes( + const struct dc_stream_state *stream, + struct dc_state *state, const struct dc_cursor_attributes *attributes) { - struct dc *dc; + const struct dc *dc; + + unsigned int max_cursor_size; if (NULL == stream) { dm_error("DC: dc_stream is NULL!\n"); @@ -289,24 +292,38 @@ bool dc_stream_set_cursor_attributes( dc = stream->ctx->dc; - /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4. - * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case: - * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs) - * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz - * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz + /* SubVP is not compatible with HW cursor larger than what can fit in cursor SRAM. + * Therefore, if cursor is greater than this, fallback to SW cursor. */ - if (dc->debug.allow_sw_cursor_fallback && - attributes->height * attributes->width * 4 > 16384 && - !stream->hw_cursor_req) { - if (check_subvp_sw_cursor_fallback_req(dc, stream)) + if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) { + max_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, state, stream); + max_cursor_size = max_cursor_size * max_cursor_size * 4; + + if (attributes->height * attributes->width * 4 > max_cursor_size) { return false; + } } - stream->cursor_attributes = *attributes; - return true; } +/* + * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address + */ +bool dc_stream_set_cursor_attributes( + struct dc_stream_state *stream, + const struct dc_cursor_attributes *attributes) +{ + bool result = false; + + if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) { + stream->cursor_attributes = *attributes; + result = true; + } + + return result; +} + bool dc_stream_program_cursor_attributes( struct dc_stream_state *stream, const struct dc_cursor_attributes *attributes) @@ -1109,3 +1126,26 @@ unsigned int dc_stream_get_max_flickerless_instant_vtotal_increase(struct dc_str return dc_stream_get_max_flickerless_instant_vtotal_delta(stream, is_gaming, false); } + +bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream) +{ + bool is_limit_pending = false; + + if (dc->current_state) + is_limit_pending = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state); + + return is_limit_pending; +} + +bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream) +{ + bool can_clear_limit = false; + + if (dc->current_state) + can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state) && + (stream->hw_cursor_req || + !stream->cursor_position.enable || + dc_stream_check_cursor_attributes(stream, dc->current_state, &stream->cursor_attributes)); + + return can_clear_limit; +} diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a5592558fa7e..4e7423cfe799 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -249,6 +249,7 @@ struct dc_caps { uint32_t i2c_speed_in_khz_hdcp; uint32_t dmdata_alloc_size; unsigned int max_cursor_size; + unsigned int max_buffered_cursor_size; unsigned int max_video_width; /* * max video plane width that can be safely assumed to be always @@ -2607,4 +2608,7 @@ void dc_disable_accelerated_mode(struct dc *dc); bool dc_is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream); +bool dc_is_cursor_limit_pending(struct dc *dc); +bool dc_can_clear_cursor_limit(struct dc *dc); + #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h index 1a12ef579ff4..1d9bae56ff6a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h @@ -105,4 +105,24 @@ bool dc_state_is_fams2_in_use( const struct dc *dc, const struct dc_state *state); + +void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream, + struct dc_state *state, + bool limit); + +bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream, + struct dc_state *state); + +void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state, + bool limit); + +bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state); + +bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state); + +bool dc_state_is_subvp_in_use(struct dc_state *state); + #endif /* _DC_STATE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index e0bfddaa23e3..341d2ffb64b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -44,6 +44,8 @@ struct mall_stream_config { */ enum mall_stream_type type; struct dc_stream_state *paired_stream; // master / slave stream + bool subvp_limit_cursor_size; /* stream has/is using subvp limiting hw cursor support */ + bool cursor_size_limit_subvp; /* stream is using hw cursor config preventing subvp */ }; struct dc_stream_status { @@ -503,6 +505,11 @@ void program_cursor_position( struct dc *dc, struct dc_stream_state *stream); +bool dc_stream_check_cursor_attributes( + const struct dc_stream_state *stream, + struct dc_state *state, + const struct dc_cursor_attributes *attributes); + bool dc_stream_set_cursor_attributes( struct dc_stream_state *stream, const struct dc_cursor_attributes *attributes); @@ -579,4 +586,8 @@ void dc_dmub_update_dirty_rect(struct dc *dc, struct dc_stream_state *stream, struct dc_surface_update *srf_updates, struct dc_state *context); + +bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream); +bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream); + #endif /* DC_STREAM_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 56dda686e299..b0fc1fd20208 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -627,6 +627,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, */ if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) && !pipe->stream->hw_cursor_req && + !dc_state_get_stream_cursor_subvp_limit(pipe->stream, context) && !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index f721aabdd470..ed75319a07d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -526,7 +526,8 @@ static void populate_dml21_output_config_from_stream_state(struct dml2_link_outp static void populate_dml21_stream_overrides_from_stream_state( struct dml2_stream_parameters *stream_desc, - struct dc_stream_state *stream) + struct dc_stream_state *stream, + struct dc_stream_status *stream_status) { switch (stream->debug.force_odm_combine_segments) { case 0: @@ -551,7 +552,9 @@ static void populate_dml21_stream_overrides_from_stream_state( if (!stream->ctx->dc->debug.enable_single_display_2to1_odm_policy || stream->debug.force_odm_combine_segments > 0) stream_desc->overrides.disable_dynamic_odm = true; - stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || stream->hw_cursor_req; + stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || + stream->hw_cursor_req || + stream_status->mall_stream_config.cursor_size_limit_subvp; } static enum dml2_swizzle_mode gfx_addr3_to_dml2_swizzle_mode(enum swizzle_mode_addr3_values addr3_mode) @@ -1024,7 +1027,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx); adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]); populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]); - populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]); + populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]); dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.fclk_pstate = dml2_twait_budgeting_setting_if_needed; dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.uclk_pstate = dml2_twait_budgeting_setting_if_needed; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 846c9c51f2d9..5e78b553adbd 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -2482,7 +2482,7 @@ bool dcn20_update_bandwidth( struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) + if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) return false; /* apply updated bandwidth parameters */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 423c0d28218b..8611eb9607df 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -2650,7 +2650,7 @@ bool dcn401_update_bandwidth( struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) + if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) return false; /* apply updated bandwidth parameters */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index b5afd8c3103d..f3696143590c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -26,6 +26,8 @@ #ifndef _CORE_STATUS_H_ #define _CORE_STATUS_H_ +#include "dc_hw_types.h" + enum dc_status { DC_OK = 1, @@ -56,6 +58,7 @@ enum dc_status { DC_NO_LINK_ENC_RESOURCE = 26, DC_FAIL_DP_PAYLOAD_ALLOCATION = 27, DC_FAIL_DP_LINK_BANDWIDTH = 28, + DC_FAIL_HW_CURSOR_SUPPORT = 29, DC_ERROR_UNEXPECTED = -1 }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 1ed3461440aa..bae98d994275 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -79,7 +79,7 @@ struct resource_funcs { * associated with it. */ struct link_encoder *(*link_enc_create_minimal)(struct dc_context *ctx, enum engine_id eng_id); - bool (*validate_bandwidth)( + enum dc_status (*validate_bandwidth)( struct dc *dc, struct dc_state *context, bool fast_validate); @@ -218,6 +218,9 @@ struct resource_funcs { int (*get_power_profile)(const struct dc_state *context); unsigned int (*get_det_buffer_size)(const struct dc_state *context); unsigned int (*get_vstartup_for_pipe)(struct pipe_ctx *pipe_ctx); + unsigned int (*get_max_hw_cursor_size)(const struct dc *dc, + struct dc_state *state, + const struct dc_stream_state *stream); }; struct audio_support{ diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 9458187b834d..7a87a7c07c1b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -628,8 +628,6 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); -bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream); - /* Get hw programming parameters container from pipe context * @pipe_ctx: pipe context * @dscl_prog_data: struct to hold programmable hw reg values diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index e0c64861eff3..84f73fdb0f95 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -836,7 +836,7 @@ static enum dc_status build_mapped_resource( return DC_OK; } -static bool dce100_validate_bandwidth( +static enum dc_status dce100_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) @@ -858,7 +858,7 @@ static bool dce100_validate_bandwidth( context->bw_ctx.bw.dce.yclk_khz = 0; } - return true; + return DC_OK; } static bool dce100_validate_surface_sets( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index 035c6cfdaee5..f3d5baac11bf 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -960,7 +960,7 @@ static enum dc_status build_mapped_resource( return DC_OK; } -static bool dce110_validate_bandwidth( +static enum dc_status dce110_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) @@ -1031,7 +1031,7 @@ static bool dce110_validate_bandwidth( context->bw_ctx.bw.dce.yclk_khz, context->bw_ctx.bw.dce.blackout_recovery_time_us); } - return result; + return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 480a50967385..4225cae68c10 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -883,7 +883,7 @@ static enum dc_status build_mapped_resource( return DC_OK; } -bool dce112_validate_bandwidth( +enum dc_status dce112_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) @@ -952,7 +952,7 @@ bool dce112_validate_bandwidth( context->bw_ctx.bw.dce.yclk_khz, context->bw_ctx.bw.dce.blackout_recovery_time_us); } - return result; + return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } enum dc_status resource_map_phy_clock_resources( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h index 1f57ebc6f9b4..6221d749246d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h @@ -42,7 +42,7 @@ enum dc_status dce112_validate_with_context( struct dc_state *context, struct dc_state *old_context); -bool dce112_validate_bandwidth( +enum dc_status dce112_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index 737c1b1d861a..d9ffdded5ce1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -863,7 +863,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool) } } -static bool dce60_validate_bandwidth( +static enum dc_status dce60_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) @@ -885,7 +885,7 @@ static bool dce60_validate_bandwidth( context->bw_ctx.bw.dce.yclk_khz = 0; } - return true; + return DC_OK; } static bool dce60_validate_surface_sets( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index 3d5113f010bb..bd5811f97531 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -869,7 +869,7 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool) } } -static bool dce80_validate_bandwidth( +static enum dc_status dce80_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) @@ -891,7 +891,7 @@ static bool dce80_validate_bandwidth( context->bw_ctx.bw.dce.yclk_khz = 0; } - return true; + return DC_OK; } static bool dce80_validate_surface_sets( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index e92f14d50adb..9d8ecc6229d9 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -23,6 +23,7 @@ * */ +#include "core_status.h" #include "dm_services.h" #include "dc.h" @@ -1125,7 +1126,7 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool) *pool = NULL; } -static bool dcn10_validate_bandwidth( +static enum dc_status dcn10_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) @@ -1136,7 +1137,7 @@ static bool dcn10_validate_bandwidth( voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate); DC_FP_END(); - return voltage_supported; + return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index e4eca3e32c1b..3405be07f5e3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -2124,7 +2124,7 @@ validate_out: return out; } -bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, +enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { bool voltage_supported; @@ -2132,14 +2132,14 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL); if (!pipes) - return false; + return DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes); DC_FP_END(); kfree(pipes); - return voltage_supported; + return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } struct pipe_ctx *dcn20_acquire_free_pipe_for_layer( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h index 4cee3fa11a7f..c0e062c7407d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h @@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params( struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt); -bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); void dcn20_merge_pipes_for_validate( struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 4bd5c2278596..9ab01b65b177 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -923,7 +923,7 @@ validate_out: * with DC_FP_START()/DC_FP_END(). Use the same approach as for * dcn20_validate_bandwidth in dcn20_resource.c. */ -static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, +static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { bool voltage_supported; @@ -931,14 +931,14 @@ static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL); if (!pipes) - return false; + return DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes); DC_FP_END(); kfree(pipes); - return voltage_supported; + return voltage_supported ? DC_OK : DC_NOT_SUPPORTED; } static void dcn21_destroy_resource_pool(struct resource_pool **pool) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index f040c22db59b..f631ae34e320 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -2035,7 +2035,7 @@ void dcn30_calculate_wm_and_dlg( DC_FP_END(); } -bool dcn30_validate_bandwidth(struct dc *dc, +enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { @@ -2092,7 +2092,7 @@ validate_out: BW_VAL_TRACE_FINISH(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h index 8e6b8b7368fd..689d9bdace81 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h @@ -56,7 +56,7 @@ unsigned int dcn30_calc_max_scaled_time( enum mmhubbub_wbif_mode mode, unsigned int urgent_watermark); -bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, +enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); bool dcn30_internal_validate_bw( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index dddddbfef85f..7e0af5297dc4 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -1758,7 +1758,7 @@ dcn31_set_mcif_arb_params(struct dc *dc, DC_FP_END(); } -bool dcn31_validate_bandwidth(struct dc *dc, +enum dc_status dcn31_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { @@ -1813,7 +1813,7 @@ validate_out: BW_VAL_TRACE_FINISH(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h index 551ad912f7be..dd82815d7efe 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h @@ -37,7 +37,7 @@ struct dcn31_resource_pool { struct resource_pool base; }; -bool dcn31_validate_bandwidth(struct dc *dc, +enum dc_status dcn31_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); void dcn31_calculate_wm_and_dlg( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index 26becc4cb804..d96bc6cb73ad 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -1694,7 +1694,7 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi *panel_config = panel_config_defaults; } -bool dcn314_validate_bandwidth(struct dc *dc, +enum dc_status dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { @@ -1750,7 +1750,7 @@ validate_out: BW_VAL_TRACE_FINISH(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } static struct resource_funcs dcn314_res_pool_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h index 49ffe71018df..f8ba531d6342 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h @@ -39,7 +39,7 @@ struct dcn314_resource_pool { struct resource_pool base; }; -bool dcn314_validate_bandwidth(struct dc *dc, +enum dc_status dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 2a59cc61ed8c..6dda862b47e3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -24,6 +24,7 @@ * */ +#include "dc_types.h" #include "dm_services.h" #include "dc.h" @@ -1806,19 +1807,56 @@ validate_out: return out; } -bool dcn32_validate_bandwidth(struct dc *dc, +enum dc_status dcn32_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { - bool out = false; + unsigned int i; + enum dc_status status; + const struct dc_stream_state *stream; + + /* reset cursor limitations on subvp */ + for (i = 0; i < context->stream_count; i++) { + stream = context->streams[i]; + + if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) { + dc_state_set_stream_cursor_subvp_limit(stream, context, false); + } + } if (dc->debug.using_dml2) - out = dml2_validate(dc, context, + status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; else - out = dml1_validate(dc, context, fast_validate); - return out; + status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + + if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) { + /* check new stream configuration still supports cursor if subvp used */ + for (i = 0; i < context->stream_count; i++) { + stream = context->streams[i]; + + if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM && + stream->cursor_position.enable && + !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) { + /* hw cursor cannot be supported with subvp active, so disable subvp for now */ + dc_state_set_stream_cursor_subvp_limit(stream, context, true); + status = DC_FAIL_HW_CURSOR_SUPPORT; + } + }; + } + + if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) { + /* attempt to validate again with subvp disabled due to cursor */ + if (dc->debug.using_dml2) + status = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + else + status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + } + + return status; } int dcn32_populate_dml_pipes_from_context( @@ -2042,6 +2080,18 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw DC_FP_END(); } +unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc, + struct dc_state *state, + const struct dc_stream_state *stream) +{ + bool limit_cur_to_buf; + + limit_cur_to_buf = dc_state_get_stream_subvp_cursor_limit(stream, state) && + !stream->hw_cursor_req; + + return limit_cur_to_buf ? dc->caps.max_buffered_cursor_size : dc->caps.max_cursor_size; +} + static struct resource_funcs dcn32_res_pool_funcs = { .destroy = dcn32_destroy_resource_pool, .link_enc_create = dcn32_link_encoder_create, @@ -2067,7 +2117,8 @@ static struct resource_funcs dcn32_res_pool_funcs = { .add_phantom_pipes = dcn32_add_phantom_pipes, .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 1aa4ced29291..d60ed77eda80 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -98,7 +98,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, unsigned int pipe_cnt, unsigned int index); -bool dcn32_validate_bandwidth(struct dc *dc, +enum dc_status dcn32_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); @@ -188,6 +188,10 @@ void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context); unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes); +unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc, + struct dc_state *state, + const struct dc_stream_state *stream); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 38d76434683e..3b91b7379cf3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1624,7 +1624,8 @@ static struct resource_funcs dcn321_res_pool_funcs = { .add_phantom_pipes = dcn32_add_phantom_pipes, .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index ffd2b816cd02..fb91209a06e8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -1732,7 +1732,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config } -static bool dcn35_validate_bandwidth(struct dc *dc, +static enum dc_status dcn35_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { @@ -1743,13 +1743,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc, fast_validate); if (fast_validate) - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); dcn35_decide_zstate_support(dc, context); DC_FP_END(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 98f5bc1b929e..989a270f7dea 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1712,7 +1712,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config } -static bool dcn351_validate_bandwidth(struct dc *dc, +static enum dc_status dcn351_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { @@ -1723,13 +1723,13 @@ static bool dcn351_validate_bandwidth(struct dc *dc, fast_validate); if (fast_validate) - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); dcn35_decide_zstate_support(dc, context); DC_FP_END(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } static struct resource_funcs dcn351_res_pool_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index b6468573dc33..96c8288fb7fa 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -1713,7 +1713,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config } -static bool dcn35_validate_bandwidth(struct dc *dc, +static enum dc_status dcn35_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { @@ -1724,13 +1724,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc, fast_validate); if (fast_validate) - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); dcn35_decide_zstate_support(dc, context); DC_FP_END(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index 7436dfbdf927..5b7148bb1701 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -1642,16 +1642,52 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta return DC_OK; } -bool dcn401_validate_bandwidth(struct dc *dc, +enum dc_status dcn401_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { - bool out = false; + unsigned int i; + enum dc_status status = DC_OK; + const struct dc_stream_state *stream; + + /* reset cursor limitations on subvp */ + for (i = 0; i < context->stream_count; i++) { + stream = context->streams[i]; + + if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) { + dc_state_set_stream_cursor_subvp_limit(stream, context, false); + } + } + if (dc->debug.using_dml2) - out = dml2_validate(dc, context, + status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); - return out; + fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + + if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) { + /* check new stream configuration still supports cursor if subvp used */ + for (i = 0; i < context->stream_count; i++) { + stream = context->streams[i]; + + if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM && + stream->cursor_position.enable && + !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) { + /* hw cursor cannot be supported with subvp active, so disable subvp for now */ + dc_state_set_stream_cursor_subvp_limit(stream, context, true); + status = DC_FAIL_HW_CURSOR_SUPPORT; + } + }; + } + + if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) { + /* attempt to validate again with subvp disabled due to cursor */ + if (dc->debug.using_dml2) + status = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + } + + return status; } void dcn401_prepare_mcache_programming(struct dc *dc, @@ -1770,7 +1806,8 @@ static struct resource_funcs dcn401_res_pool_funcs = { .build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params, .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, .get_power_profile = dcn401_get_power_profile, - .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe, + .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size }; static uint32_t read_pipe_fuses(struct dc_context *ctx) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h index 4c259745d519..dc52a30991af 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h @@ -22,7 +22,7 @@ struct resource_pool *dcn401_create_resource_pool( enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state); -bool dcn401_validate_bandwidth(struct dc *dc, +enum dc_status dcn401_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); -- 2.51.0 From 33bc89949b4366dff2dca30bc61ba1c0cbcd2ab2 Mon Sep 17 00:00:00 2001 From: TungYu Lu Date: Fri, 11 Apr 2025 10:41:48 +0800 Subject: [PATCH 07/16] drm/amd/display: Correct prefetch calculation [Why] The minimum value of the dst_y_prefetch_equ was not correct in prefetch calculation whice causes OPTC underflow. [How] Add the min operation of dst_y_prefetch_equ in prefetch calculation for legacy DML. Reviewed-by: Nicholas Kazlauskas Signed-off-by: TungYu Lu Signed-off-by: Zaeem Mohamed Tested-by: Mark Broadworth Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c | 1 + drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c | 1 + drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c | 1 + 3 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index f1fe49401bc0..8d24763938ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -1002,6 +1002,7 @@ static bool CalculatePrefetchSchedule( dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal); + dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC); Tsw_oto = Lsw_oto * LineTime; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index f567a9023682..ed59c77bc6f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -1105,6 +1105,7 @@ static bool CalculatePrefetchSchedule( Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0; dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto; dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal); + dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0; Tpre_rounded = dst_y_prefetch_equ * LineTime; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 5865e8fa2d8e..9f3938a50240 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -1123,6 +1123,7 @@ static bool CalculatePrefetchSchedule( Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0; dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto; dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal); + dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0; Tpre_rounded = dst_y_prefetch_equ * LineTime; -- 2.51.0 From 20232192a5044d1f66dcbef0a24de1bb8157738d Mon Sep 17 00:00:00 2001 From: Gergo Koteles Date: Wed, 2 Apr 2025 19:03:31 +0200 Subject: [PATCH 08/16] drm/amd/display: do not copy invalid CRTC timing info Since b255ce4388e0, it is possible that the CRTC timing information for the preferred mode has not yet been calculated while amdgpu_dm_connector_mode_valid() is running. In this case use the CRTC timing information of the actual mode. Fixes: b255ce4388e0 ("drm/amdgpu: don't change mode in amdgpu_dm_connector_mode_valid()") Closes: https://lore.kernel.org/all/ed09edb167e74167a694f4854102a3de6d2f1433.camel@irl.hu/ Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4085 Signed-off-by: Gergo Koteles Reviewed-by: Zaeem Mohamed Tested-by: Mark Broadworth Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f26066817fcb..31a5b8fc4dc4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6588,12 +6588,12 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, const struct drm_display_mode *native_mode, bool scale_enabled) { - if (scale_enabled) { - copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); - } else if (native_mode->clock == drm_mode->clock && - native_mode->htotal == drm_mode->htotal && - native_mode->vtotal == drm_mode->vtotal) { - copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); + if (scale_enabled || ( + native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal)) { + if (native_mode->crtc_clock) + copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); } else { /* no scaling nor amdgpu inserted, no need to patch */ } -- 2.51.0 From 696e8fa3547394d50e387923ba220fbdb6347b6d Mon Sep 17 00:00:00 2001 From: Xiang Liu Date: Fri, 18 Apr 2025 15:13:44 +0800 Subject: [PATCH 09/16] drm/amdgpu: Print kernel message when error logged by scrub Print a kernel message when the scrub bit of status register is set to indicate that errors are being logged by the scrub. Signed-off-by: Xiang Liu Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index b4ad163f42a7..3835f2592914 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -120,6 +120,9 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st for (i = 0; i < ARRAY_SIZE(aca_regs); i++) RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n", idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); + + if (ACA_REG__STATUS__SCRUB(bank->regs[ACA_REG_IDX_STATUS])) + RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n"); } static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type, -- 2.51.0 From 9427ff3b4ab6d623940aca146f3e7606824ee75b Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Fri, 18 Apr 2025 01:21:14 +0100 Subject: [PATCH 10/16] drm/radeon/radeon_audio: Remove unused r600_hdmi_audio_workaround The last use of r600_hdmi_audio_workaround() was removed by 2014's commit 6e72376dcc66 ("radeon/audio: consolidate audio_mode_set() functions") Remove it. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600_hdmi.c | 22 ---------------------- drivers/gpu/drm/radeon/radeon_asic.h | 1 - 2 files changed, 23 deletions(-) diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 661f374f5f27..9758f3a9df75 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -290,28 +290,6 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder) return result; } -/* - * write the audio workaround status to the hardware - */ -void r600_hdmi_audio_workaround(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - uint32_t offset = dig->afmt->offset; - bool hdmi_audio_workaround = false; /* FIXME */ - u32 value; - - if (!hdmi_audio_workaround || - r600_hdmi_is_audio_buffer_filled(encoder)) - value = 0; /* disable workaround */ - else - value = HDMI0_AUDIO_TEST_EN; /* enable workaround */ - WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset, - value, ~HDMI0_AUDIO_TEST_EN); -} - void r600_hdmi_audio_set_dto(struct radeon_device *rdev, struct radeon_crtc *crtc, unsigned int clock) { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 8f5e07834fcc..9e697f10f9ca 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -401,7 +401,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock); void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, size_t size); void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock); -void r600_hdmi_audio_workaround(struct drm_encoder *encoder); int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); u32 r600_get_xclk(struct radeon_device *rdev); -- 2.51.0 From fbe6fa7228c615276d0cb611afefa94c5c873c5e Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Fri, 18 Apr 2025 01:21:16 +0100 Subject: [PATCH 11/16] drm/radeon: Remove unused radeon_fence_wait_any radeon_fence_wait_any() last use was removed in 2023's commit 254986e324ad ("drm/radeon: Use the drm suballocation manager implementation.") Remove it. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 3 -- drivers/gpu/drm/radeon/radeon_fence.c | 42 --------------------------- 2 files changed, 45 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 8605c074d9f7..63c47585afbc 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -394,9 +394,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, l int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); int radeon_fence_wait_next(struct radeon_device *rdev, int ring); int radeon_fence_wait_empty(struct radeon_device *rdev, int ring); -int radeon_fence_wait_any(struct radeon_device *rdev, - struct radeon_fence **fences, - bool intr); struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); void radeon_fence_unref(struct radeon_fence **fence); unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 8ff4f18b51a9..5b5b54e876d4 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -574,48 +574,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) return r; } -/** - * radeon_fence_wait_any - wait for a fence to signal on any ring - * - * @rdev: radeon device pointer - * @fences: radeon fence object(s) - * @intr: use interruptable sleep - * - * Wait for any requested fence to signal (all asics). Fence - * array is indexed by ring id. @intr selects whether to use - * interruptable (true) or non-interruptable (false) sleep when - * waiting for the fences. Used by the suballocator. - * Returns 0 if any fence has passed, error for all other cases. - */ -int radeon_fence_wait_any(struct radeon_device *rdev, - struct radeon_fence **fences, - bool intr) -{ - uint64_t seq[RADEON_NUM_RINGS]; - unsigned int i, num_rings = 0; - long r; - - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - seq[i] = 0; - - if (!fences[i]) - continue; - - seq[i] = fences[i]->seq; - ++num_rings; - } - - /* nothing to wait for ? */ - if (num_rings == 0) - return -ENOENT; - - r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); - if (r < 0) - return r; - - return 0; -} - /** * radeon_fence_wait_next - wait for the next fence to signal * -- 2.51.0 From d61724056a74b6316f92433bd89f08421a36d2d9 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Fri, 18 Apr 2025 01:21:17 +0100 Subject: [PATCH 12/16] drm/amd/display: Remove unused *vbios_smu_set_dprefclk rn_vbios_smu_set_dprefclk() was added in 2019 by commit 4edb6fc91878 ("drm/amd/display: Add Renoir clock manager") rv1_vbios_smu_set_dprefclk() was also added in 2019 by commit dc88b4a684d2 ("drm/amd/display: make clk mgr soc specific") neither have been used. Remove them. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Alex Deucher --- .../dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c | 14 -------------- .../dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h | 1 - .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c | 14 -------------- .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h | 1 - 4 files changed, 30 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 19897fa52e7e..d82a52319088 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -142,17 +142,3 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di return actual_dispclk_set_mhz * 1000; } - -int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr) -{ - int actual_dprefclk_set_mhz = -1; - - actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param( - clk_mgr, - VBIOSSMC_MSG_SetDprefclkFreq, - khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz)); - - /* TODO: add code for programing DP DTO, currently this is down by command table */ - - return actual_dprefclk_set_mhz * 1000; -} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h index 083cb3158859..81d7c912549c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h @@ -27,6 +27,5 @@ #define DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz); -int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 23b390245b5d..5a633333dbb5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -164,20 +164,6 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis return actual_dispclk_set_mhz * 1000; } -int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr) -{ - int actual_dprefclk_set_mhz = -1; - - actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param( - clk_mgr, - VBIOSSMC_MSG_SetDprefclkFreq, - khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz)); - - /* TODO: add code for programing DP DTO, currently this is down by command table */ - - return actual_dprefclk_set_mhz * 1000; -} - int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz) { int actual_dcfclk_set_mhz = -1; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h index 1ce19d875358..f76fad87f0e1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h @@ -30,7 +30,6 @@ enum dcn_pwr_state; int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr); int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz); -int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr); int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz); int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz); void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz); -- 2.51.0 From ba324ffb25999e14b8488d085ae9916d6f4a6bcf Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 16 Apr 2025 16:59:08 -0400 Subject: [PATCH 13/16] drm/amdgpu/userq: optimize enforce isolation and s/r If user queues are disabled for all IPs in the case of suspend and resume and for gfx/compute in the case of enforce isolation, we can return early. Reviewed-by: Prike Liang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index 59488acd89fa..17bf2d568ae2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -759,12 +759,16 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) int amdgpu_userq_suspend(struct amdgpu_device *adev) { + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; int ret = 0; + if (!ip_mask) + return 0; + mutex_lock(&adev->userq_mutex); list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { cancel_delayed_work_sync(&uqm->resume_work); @@ -779,12 +783,16 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev) int amdgpu_userq_resume(struct amdgpu_device *adev) { + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; int ret = 0; + if (!ip_mask) + return 0; + mutex_lock(&adev->userq_mutex); list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { @@ -799,12 +807,17 @@ int amdgpu_userq_resume(struct amdgpu_device *adev) int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx) { + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; int ret = 0; + /* only need to stop gfx/compute */ + if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) + return 0; + mutex_lock(&adev->userq_mutex); if (adev->userq_halt_for_enforce_isolation) dev_warn(adev->dev, "userq scheduling already stopped!\n"); @@ -827,12 +840,17 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx) { + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; int ret = 0; + /* only need to stop gfx/compute */ + if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) + return 0; + mutex_lock(&adev->userq_mutex); if (!adev->userq_halt_for_enforce_isolation) dev_warn(adev->dev, "userq scheduling already started!\n"); -- 2.51.0 From e67b95f0cd5e8dd57a9df5d5d15da6ba7847808a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Sat, 12 Apr 2025 12:59:38 -0400 Subject: [PATCH 14/16] drm/amdgpu: switch from queue_active to queue state Track the state of the queue rather than simple active vs not. This is needed for other states (hung, preempted, etc.). While we are at it, move the state tracking into the user queue front end code. Reviewed-by: Prike Liang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 101 ++++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h | 9 +- drivers/gpu/drm/amd/amdgpu/mes_userqueue.c | 8 -- 3 files changed, 77 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index 17bf2d568ae2..712fd34740a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -44,6 +44,45 @@ u32 amdgpu_userqueue_get_supported_ip_mask(struct amdgpu_device *adev) return userq_ip_mask; } +static int +amdgpu_userqueue_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const struct amdgpu_userq_funcs *userq_funcs = + adev->userq_funcs[queue->queue_type]; + int r = 0; + + if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { + r = userq_funcs->unmap(uq_mgr, queue); + if (r) + queue->state = AMDGPU_USERQ_STATE_HUNG; + else + queue->state = AMDGPU_USERQ_STATE_UNMAPPED; + } + return r; +} + +static int +amdgpu_userqueue_map_helper(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const struct amdgpu_userq_funcs *userq_funcs = + adev->userq_funcs[queue->queue_type]; + int r = 0; + + if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) { + r = userq_funcs->map(uq_mgr, queue); + if (r) { + queue->state = AMDGPU_USERQ_STATE_HUNG; + } else { + queue->state = AMDGPU_USERQ_STATE_MAPPED; + } + } + return r; +} + static void amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue, @@ -79,7 +118,7 @@ amdgpu_userqueue_active(struct amdgpu_userq_mgr *uq_mgr) mutex_lock(&uq_mgr->userq_mutex); /* Resume all the queues for this process */ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) - ret += queue->queue_active; + ret += queue->state == AMDGPU_USERQ_STATE_MAPPED; mutex_unlock(&uq_mgr->userq_mutex); return ret; @@ -254,9 +293,8 @@ amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id) struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; struct amdgpu_device *adev = uq_mgr->adev; - const struct amdgpu_userq_funcs *uq_funcs; struct amdgpu_usermode_queue *queue; - int r; + int r = 0; cancel_delayed_work(&uq_mgr->resume_work); mutex_lock(&uq_mgr->userq_mutex); @@ -267,8 +305,7 @@ amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id) mutex_unlock(&uq_mgr->userq_mutex); return -EINVAL; } - uq_funcs = adev->userq_funcs[queue->queue_type]; - r = uq_funcs->unmap(uq_mgr, queue); + r = amdgpu_userqueue_unmap_helper(uq_mgr, queue); amdgpu_bo_unpin(queue->db_obj.obj); amdgpu_bo_unref(&queue->db_obj.obj); amdgpu_userqueue_cleanup(uq_mgr, queue, queue_id); @@ -414,7 +451,7 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) else skip_map_queue = false; if (!skip_map_queue) { - r = uq_funcs->map(uq_mgr, queue); + r = amdgpu_userqueue_map_helper(uq_mgr, queue); if (r) { mutex_unlock(&adev->userq_mutex); DRM_ERROR("Failed to map Queue\n"); @@ -489,19 +526,19 @@ static int amdgpu_userqueue_resume_all(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_device *adev = uq_mgr->adev; - const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; int queue_id; - int ret = 0; + int ret = 0, r; /* Resume all the queues for this process */ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { - userq_funcs = adev->userq_funcs[queue->queue_type]; - ret |= userq_funcs->map(uq_mgr, queue); + r = amdgpu_userqueue_map_helper(uq_mgr, queue); + if (r) + ret = r; } if (ret) - DRM_ERROR("Failed to map all the queues\n"); + dev_err(adev->dev, "Failed to map all the queues\n"); return ret; } @@ -647,19 +684,19 @@ static int amdgpu_userqueue_suspend_all(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_device *adev = uq_mgr->adev; - const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; int queue_id; - int ret = 0; + int ret = 0, r; /* Try to unmap all the queues in this process ctx */ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { - userq_funcs = adev->userq_funcs[queue->queue_type]; - ret += userq_funcs->unmap(uq_mgr, queue); + r = amdgpu_userqueue_unmap_helper(uq_mgr, queue); + if (r) + ret = r; } if (ret) - DRM_ERROR("Couldn't unmap all the queues\n"); + dev_err(adev->dev, "Couldn't unmap all the queues\n"); return ret; } @@ -760,11 +797,10 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) int amdgpu_userq_suspend(struct amdgpu_device *adev) { u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); - const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; - int ret = 0; + int ret = 0, r; if (!ip_mask) return 0; @@ -773,8 +809,9 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev) list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { cancel_delayed_work_sync(&uqm->resume_work); idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - userq_funcs = adev->userq_funcs[queue->queue_type]; - ret |= userq_funcs->unmap(uqm, queue); + r = amdgpu_userqueue_unmap_helper(uqm, queue); + if (r) + ret = r; } } mutex_unlock(&adev->userq_mutex); @@ -784,11 +821,10 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev) int amdgpu_userq_resume(struct amdgpu_device *adev) { u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); - const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; - int ret = 0; + int ret = 0, r; if (!ip_mask) return 0; @@ -796,8 +832,9 @@ int amdgpu_userq_resume(struct amdgpu_device *adev) mutex_lock(&adev->userq_mutex); list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - userq_funcs = adev->userq_funcs[queue->queue_type]; - ret |= userq_funcs->map(uqm, queue); + r = amdgpu_userqueue_map_helper(uqm, queue); + if (r) + ret = r; } } mutex_unlock(&adev->userq_mutex); @@ -808,11 +845,10 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx) { u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); - const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; - int ret = 0; + int ret = 0, r; /* only need to stop gfx/compute */ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) @@ -828,8 +864,9 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, if (((queue->queue_type == AMDGPU_HW_IP_GFX) || (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && (queue->xcp_id == idx)) { - userq_funcs = adev->userq_funcs[queue->queue_type]; - ret |= userq_funcs->unmap(uqm, queue); + r = amdgpu_userqueue_unmap_helper(uqm, queue); + if (r) + ret = r; } } } @@ -841,11 +878,10 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx) { u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev); - const struct amdgpu_userq_funcs *userq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_userq_mgr *uqm, *tmp; int queue_id; - int ret = 0; + int ret = 0, r; /* only need to stop gfx/compute */ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) @@ -860,8 +896,9 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, if (((queue->queue_type == AMDGPU_HW_IP_GFX) || (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && (queue->xcp_id == idx)) { - userq_funcs = adev->userq_funcs[queue->queue_type]; - ret |= userq_funcs->map(uqm, queue); + r = amdgpu_userqueue_map_helper(uqm, queue); + if (r) + ret = r; } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h index b49f147eb69c..8f392a0947a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h @@ -32,6 +32,13 @@ #define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr) #define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name) +enum amdgpu_userqueue_state { + AMDGPU_USERQ_STATE_UNMAPPED = 0, + AMDGPU_USERQ_STATE_MAPPED, + AMDGPU_USERQ_STATE_PREEMPTED, + AMDGPU_USERQ_STATE_HUNG, +}; + struct amdgpu_mqd_prop; struct amdgpu_userq_obj { @@ -42,7 +49,7 @@ struct amdgpu_userq_obj { struct amdgpu_usermode_queue { int queue_type; - uint8_t queue_active; + enum amdgpu_userqueue_state state; uint64_t doorbell_handle; uint64_t doorbell_index; uint64_t flags; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index b3157df8ae10..4c01c3a03095 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -120,9 +120,6 @@ static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr, struct mes_add_queue_input queue_input; int r; - if (queue->queue_active) - return 0; - memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); queue_input.process_va_start = 0; @@ -155,7 +152,6 @@ static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr, return r; } - queue->queue_active = true; DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index); return 0; } @@ -168,9 +164,6 @@ static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_userq_obj *ctx = &queue->fw_obj; int r; - if (!queue->queue_active) - return 0; - memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input)); queue_input.doorbell_offset = queue->doorbell_index; queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; @@ -180,7 +173,6 @@ static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr, amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r); - queue->queue_active = false; return r; } -- 2.51.0 From 36b0bc1731c82fdf2b9228fe4e86d99e5063be1b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 16 Apr 2025 14:35:05 -0400 Subject: [PATCH 15/16] drm/amdgpu/userq: unmap queues amdgpu_userq_mgr_fini() This was missed when the map and unmap were split out of the mqd create and destroy functions. Fixes: b0db33c8c50f ("drm/amdgpu/userq: rework front end call sequence") Reviewed-by: Prike Liang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index 712fd34740a0..d821e5d57417 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -779,8 +779,10 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) cancel_delayed_work(&userq_mgr->resume_work); mutex_lock(&userq_mgr->userq_mutex); - idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) + idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) { + amdgpu_userqueue_unmap_helper(userq_mgr, queue); amdgpu_userqueue_cleanup(userq_mgr, queue, queue_id); + } mutex_lock(&adev->userq_mutex); list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { if (uqm == userq_mgr) { -- 2.51.0 From d13e95967ebfde85d244ea626c8b14a12bca14ac Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 16 Apr 2025 14:40:25 -0400 Subject: [PATCH 16/16] drm/amdgpu/userq: move waiting for last fence before umap Need to wait for the last fence before unmapping. This also fixes a memory leak in amdgpu_userqueue_cleanup() when the fence isn't signalled. Fixes: b0db33c8c50f ("drm/amdgpu/userq: rework front end call sequence") Reviewed-by: Prike Liang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 24 ++++++++++++------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index d821e5d57417..b75b93e69c09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -84,23 +84,27 @@ amdgpu_userqueue_map_helper(struct amdgpu_userq_mgr *uq_mgr, } static void -amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr, - struct amdgpu_usermode_queue *queue, - int queue_id) +amdgpu_userqueue_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) { struct amdgpu_device *adev = uq_mgr->adev; - const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; struct dma_fence *f = queue->last_fence; int ret; if (f && !dma_fence_is_signaled(f)) { ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); - if (ret <= 0) { - DRM_ERROR("Timed out waiting for fence=%llu:%llu\n", - f->context, f->seqno); - return; - } + if (ret <= 0) + dev_err(adev->dev, "Timed out waiting for fence f=%p\n", f); } +} + +static void +amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue, + int queue_id) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; uq_funcs->mqd_destroy(uq_mgr, queue); amdgpu_userq_fence_driver_free(queue); @@ -305,6 +309,7 @@ amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id) mutex_unlock(&uq_mgr->userq_mutex); return -EINVAL; } + amdgpu_userqueue_wait_for_last_fence(uq_mgr, queue); r = amdgpu_userqueue_unmap_helper(uq_mgr, queue); amdgpu_bo_unpin(queue->db_obj.obj); amdgpu_bo_unref(&queue->db_obj.obj); @@ -780,6 +785,7 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) mutex_lock(&userq_mgr->userq_mutex); idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) { + amdgpu_userqueue_wait_for_last_fence(userq_mgr, queue); amdgpu_userqueue_unmap_helper(userq_mgr, queue); amdgpu_userqueue_cleanup(userq_mgr, queue, queue_id); } -- 2.51.0