From 2c01befe4a2707302eb1c97b955d94d66fac7b6f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 4 Mar 2025 17:57:43 -0500 Subject: [PATCH 01/16] drm/amdgpu/vcn: fix idle work handler for VCN 2.5 VCN 2.5 uses the PG callback to enable VCN DPM which is a global state. As such, we need to make sure all instances are in the same state. v2: switch to a ref count (Lijo) v3: switch to its own idle work handler v4: fix logic in DPG handling Fixes: 4ce4fe27205c ("drm/amdgpu/vcn: use per instance callbacks for idle work handler") Reviewed-by: Boyuan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 120 +++++++++++++++++++++++++- 1 file changed, 116 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index dff1a8859036..ff03436698a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -107,6 +107,115 @@ static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN1 }; +static void vcn_v2_5_idle_work_handler(struct work_struct *work) +{ + struct amdgpu_vcn_inst *vcn_inst = + container_of(work, struct amdgpu_vcn_inst, idle_work.work); + struct amdgpu_device *adev = vcn_inst->adev; + unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; + unsigned int i, j; + int r = 0; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + struct amdgpu_vcn_inst *v = &adev->vcn.inst[i]; + + if (adev->vcn.harvest_config & (1 << i)) + continue; + + for (j = 0; j < v->num_enc_rings; ++j) + fence[i] += amdgpu_fence_count_emitted(&v->ring_enc[j]); + + /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && + !v->using_unified_queue) { + struct dpg_pause_state new_state; + + if (fence[i] || + unlikely(atomic_read(&v->dpg_enc_submission_cnt))) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + + v->pause_dpg_mode(v, &new_state); + } + + fence[i] += amdgpu_fence_count_emitted(&v->ring_dec); + fences += fence[i]; + + } + + if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) { + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_GATE); + r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, + false); + if (r) + dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r); + } else { + schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT); + } +} + +static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me]; + int r = 0; + + atomic_inc(&adev->vcn.inst[0].total_submission_cnt); + + if (!cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work)) { + r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, + true); + if (r) + dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r); + } + + mutex_lock(&adev->vcn.inst[0].vcn_pg_lock); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); + + /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && + !v->using_unified_queue) { + struct dpg_pause_state new_state; + + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) { + atomic_inc(&v->dpg_enc_submission_cnt); + new_state.fw_based = VCN_DPG_STATE__PAUSE; + } else { + unsigned int fences = 0; + unsigned int i; + + for (i = 0; i < v->num_enc_rings; ++i) + fences += amdgpu_fence_count_emitted(&v->ring_enc[i]); + + if (fences || atomic_read(&v->dpg_enc_submission_cnt)) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + } + v->pause_dpg_mode(v, &new_state); + } + mutex_unlock(&adev->vcn.inst[0].vcn_pg_lock); +} + +static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ + if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && + ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC && + !adev->vcn.inst[ring->me].using_unified_queue) + atomic_dec(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); + + atomic_dec(&adev->vcn.inst[0].total_submission_cnt); + + schedule_delayed_work(&adev->vcn.inst[0].idle_work, + VCN_IDLE_TIMEOUT); +} + /** * vcn_v2_5_early_init - set function pointers and load microcode * @@ -201,6 +310,9 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + /* Override the work func */ + adev->vcn.inst[j].idle_work.work.func = vcn_v2_5_idle_work_handler; + amdgpu_vcn_setup_ucode(adev, j); r = amdgpu_vcn_resume(adev, j); @@ -1661,8 +1773,8 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { .insert_start = vcn_v2_0_dec_ring_insert_start, .insert_end = vcn_v2_0_dec_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, + .begin_use = vcn_v2_5_ring_begin_use, + .end_use = vcn_v2_5_ring_end_use, .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, @@ -1759,8 +1871,8 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { .insert_nop = amdgpu_ring_insert_nop, .insert_end = vcn_v2_0_enc_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, + .begin_use = vcn_v2_5_ring_begin_use, + .end_use = vcn_v2_5_ring_end_use, .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -- 2.51.0 From ed962f8d0603da15c26f1c9ce60cba42607a2768 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Tue, 11 Feb 2025 12:56:01 -0500 Subject: [PATCH 02/16] drm/amdkfd: Add pm_config_dequeue_wait_counts API Update pm_update_grace_period() to more cleaner pm_config_dequeue_wait_counts(). Previously, grace_period variable was overloaded as a variable and a macro, making it inflexible to configure additional dequeue wait times. pm_config_dequeue_wait_counts() now takes in a cmd / variable. This allows flexibility to update different dequeue wait times. Signed-off-by: Harish Kasiviswanathan Reviewed-by: Jonathan Kim Signed-off-by: Alex Deucher --- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 45 ++++++-------- .../drm/amd/amdkfd/kfd_device_queue_manager.h | 11 +++- .../gpu/drm/amd/amdkfd/kfd_packet_manager.c | 26 +++++++- .../drm/amd/amdkfd/kfd_packet_manager_v9.c | 59 ++++++++++++++----- .../drm/amd/amdkfd/kfd_packet_manager_vi.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 28 +++++++-- 6 files changed, 123 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2afcc1b4856a..885e0e9cf21b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -43,6 +43,8 @@ /* Size of the per-pipe EOP queue */ #define CIK_HPD_EOP_BYTES_LOG2 11 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2) +/* See unmap_queues_cpsch() */ +#define USE_DEFAULT_GRACE_PERIOD 0xffffffff static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid); @@ -1746,10 +1748,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm) init_sdma_bitmaps(dqm); - if (dqm->dev->kfd2kgd->get_iq_wait_times) - dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev, - &dqm->wait_times, - ffs(dqm->dev->xcc_mask) - 1); + update_dqm_wait_times(dqm); return 0; } @@ -1845,25 +1844,11 @@ static int start_cpsch(struct device_queue_manager *dqm) /* clear hang status when driver try to start the hw scheduler */ dqm->sched_running = true; - if (!dqm->dev->kfd->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) { + if (pm_config_dequeue_wait_counts(&dqm->packet_mgr, + KFD_DEQUEUE_WAIT_INIT, 0 /* unused */)) + dev_err(dev, "Setting optimized dequeue wait failed. Using default values\n"); execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); - - /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */ - if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu && - (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) { - uint32_t reg_offset = 0; - uint32_t grace_period = 1; - - retval = pm_update_grace_period(&dqm->packet_mgr, - grace_period); - if (retval) - dev_err(dev, "Setting grace timeout failed\n"); - else if (dqm->dev->kfd2kgd->build_grace_period_packet_info) - /* Update dqm->wait_times maintained in software */ - dqm->dev->kfd2kgd->build_grace_period_packet_info( - dqm->dev->adev, dqm->wait_times, - grace_period, ®_offset, - &dqm->wait_times); } /* setup per-queue reset detection buffer */ @@ -2359,7 +2344,14 @@ static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sd return is_sdma ? reset_hung_queues_sdma(dqm) : reset_hung_queues(dqm); } -/* dqm->lock mutex has to be locked before calling this function */ +/* dqm->lock mutex has to be locked before calling this function + * + * @grace_period: If USE_DEFAULT_GRACE_PERIOD then default wait time + * for context switch latency. Lower values are used by debugger + * since context switching are triggered at high frequency. + * This is configured by setting CP_IQ_WAIT_TIME2.SCH_WAVE + * + */ static int unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, uint32_t filter_param, @@ -2378,7 +2370,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, return -EIO; if (grace_period != USE_DEFAULT_GRACE_PERIOD) { - retval = pm_update_grace_period(&dqm->packet_mgr, grace_period); + retval = pm_config_dequeue_wait_counts(&dqm->packet_mgr, + KFD_DEQUEUE_WAIT_SET_SCH_WAVE, grace_period); if (retval) goto out; } @@ -2419,8 +2412,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, /* We need to reset the grace period value for this device */ if (grace_period != USE_DEFAULT_GRACE_PERIOD) { - if (pm_update_grace_period(&dqm->packet_mgr, - USE_DEFAULT_GRACE_PERIOD)) + if (pm_config_dequeue_wait_counts(&dqm->packet_mgr, + KFD_DEQUEUE_WAIT_RESET, 0 /* unused */)) dev_err(dev, "Failed to reset grace period\n"); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 122eb745e9c4..74a61b5b2f0b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -37,7 +37,6 @@ #define KFD_MES_PROCESS_QUANTUM 100000 #define KFD_MES_GANG_QUANTUM 10000 -#define USE_DEFAULT_GRACE_PERIOD 0xffffffff struct device_process_node { struct qcm_process_device *qpd; @@ -360,4 +359,14 @@ static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val /* SDMA activity counter is stored at queue's RPTR + 0x8 location. */ return get_user(*val, q_rptr + 1); } + +static inline void update_dqm_wait_times(struct device_queue_manager *dqm) +{ + if (dqm->dev->kfd2kgd->get_iq_wait_times) + dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev, + &dqm->wait_times, + ffs(dqm->dev->xcc_mask) - 1); +} + + #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 4984b41cd372..3f574d82b5fc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -396,14 +396,29 @@ out: return retval; } -int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period) +/* pm_config_dequeue_wait_counts: Configure dequeue timer Wait Counts + * by writing to CP_IQ_WAIT_TIME2 registers. + * + * @cmd: See emum kfd_config_dequeue_wait_counts_cmd definition + * @value: Depends on the cmd. This parameter is unused for + * KFD_DEQUEUE_WAIT_INIT and KFD_DEQUEUE_WAIT_RESET. For + * KFD_DEQUEUE_WAIT_SET_SCH_WAVE it holds value to be set + * + */ +int pm_config_dequeue_wait_counts(struct packet_manager *pm, + enum kfd_config_dequeue_wait_counts_cmd cmd, + uint32_t value) { struct kfd_node *node = pm->dqm->dev; struct device *dev = node->adev->dev; int retval = 0; uint32_t *buffer, size; - size = pm->pmf->set_grace_period_size; + if (!pm->pmf->config_dequeue_wait_counts || + !pm->pmf->config_dequeue_wait_counts_size) + return 0; + + size = pm->pmf->config_dequeue_wait_counts_size; mutex_lock(&pm->lock); @@ -419,13 +434,18 @@ int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period) goto out; } - retval = pm->pmf->set_grace_period(pm, buffer, grace_period); + retval = pm->pmf->config_dequeue_wait_counts(pm, buffer, + cmd, value); if (!retval) retval = kq_submit_packet(pm->priv_queue); else kq_rollback_packet(pm->priv_queue); } + /* If default value is modified, cache that value in dqm->wait_times */ + if (!retval && cmd == KFD_DEQUEUE_WAIT_INIT) + update_dqm_wait_times(pm->dqm); + out: mutex_unlock(&pm->lock); return retval; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index d56525201155..b9c611b249e6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -297,23 +297,54 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, return 0; } -static int pm_set_grace_period_v9(struct packet_manager *pm, +static inline void pm_build_dequeue_wait_counts_packet_info(struct packet_manager *pm, + uint32_t sch_value, uint32_t *reg_offset, + uint32_t *reg_data) +{ + pm->dqm->dev->kfd2kgd->build_grace_period_packet_info( + pm->dqm->dev->adev, + pm->dqm->wait_times, + sch_value, + reg_offset, + reg_data); +} + +static int pm_config_dequeue_wait_counts_v9(struct packet_manager *pm, uint32_t *buffer, - uint32_t grace_period) + enum kfd_config_dequeue_wait_counts_cmd cmd, + uint32_t value) { struct pm4_mec_write_data_mmio *packet; uint32_t reg_offset = 0; uint32_t reg_data = 0; - pm->dqm->dev->kfd2kgd->build_grace_period_packet_info( - pm->dqm->dev->adev, - pm->dqm->wait_times, - grace_period, - ®_offset, - ®_data); - - if (grace_period == USE_DEFAULT_GRACE_PERIOD) + switch (cmd) { + case KFD_DEQUEUE_WAIT_INIT: + /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */ + if (amdgpu_emu_mode == 0 && pm->dqm->dev->adev->gmc.is_app_apu && + (KFD_GC_VERSION(pm->dqm->dev) == IP_VERSION(9, 4, 3))) + pm_build_dequeue_wait_counts_packet_info(pm, 1, ®_offset, ®_data); + else + return 0; + break; + case KFD_DEQUEUE_WAIT_RESET: + /* function called only to get reg_offset */ + pm_build_dequeue_wait_counts_packet_info(pm, 0, ®_offset, ®_data); reg_data = pm->dqm->wait_times; + break; + + case KFD_DEQUEUE_WAIT_SET_SCH_WAVE: + /* The CP cannot handle value 0 and it will result in + * an infinite grace period being set so set to 1 to prevent this. + */ + if (!value) + value = 1; + pm_build_dequeue_wait_counts_packet_info(pm, value, ®_offset, ®_data); + break; + default: + pr_err("Invalid dequeue wait cmd\n"); + return -EINVAL; + } packet = (struct pm4_mec_write_data_mmio *)buffer; memset(buffer, 0, sizeof(struct pm4_mec_write_data_mmio)); @@ -415,7 +446,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = { .set_resources = pm_set_resources_v9, .map_queues = pm_map_queues_v9, .unmap_queues = pm_unmap_queues_v9, - .set_grace_period = pm_set_grace_period_v9, + .config_dequeue_wait_counts = pm_config_dequeue_wait_counts_v9, .query_status = pm_query_status_v9, .release_mem = NULL, .map_process_size = sizeof(struct pm4_mes_map_process), @@ -423,7 +454,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), - .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), + .config_dequeue_wait_counts_size = sizeof(struct pm4_mec_write_data_mmio), .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = 0, }; @@ -434,7 +465,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = { .set_resources = pm_set_resources_v9, .map_queues = pm_map_queues_v9, .unmap_queues = pm_unmap_queues_v9, - .set_grace_period = pm_set_grace_period_v9, + .config_dequeue_wait_counts = pm_config_dequeue_wait_counts_v9, .query_status = pm_query_status_v9, .release_mem = NULL, .map_process_size = sizeof(struct pm4_mes_map_process_aldebaran), @@ -442,7 +473,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), - .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), + .config_dequeue_wait_counts_size = sizeof(struct pm4_mec_write_data_mmio), .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = 0, }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index 347c86e1c378..a1de5d7e173a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -304,7 +304,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = { .set_resources = pm_set_resources_vi, .map_queues = pm_map_queues_vi, .unmap_queues = pm_unmap_queues_vi, - .set_grace_period = NULL, + .config_dequeue_wait_counts = NULL, .query_status = pm_query_status_vi, .release_mem = pm_release_mem_vi, .map_process_size = sizeof(struct pm4_mes_map_process), @@ -312,7 +312,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), - .set_grace_period_size = 0, + .config_dequeue_wait_counts_size = 0, .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = sizeof(struct pm4_mec_release_mem) }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index bb09c873a9a5..f6aedf69c644 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1389,6 +1389,24 @@ int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm, #define KFD_FENCE_COMPLETED (100) #define KFD_FENCE_INIT (10) +/** + * enum kfd_config_dequeue_wait_counts_cmd - Command for configuring + * dequeue wait counts. + * + * @KFD_DEQUEUE_WAIT_INIT: Set optimized dequeue wait counts for a + * certain ASICs. For these ASICs, this is default value used by RESET + * @KFD_DEQUEUE_WAIT_RESET: Reset dequeue wait counts to the optimized value + * for certain ASICs. For others set it to default hardware reset value + * @KFD_DEQUEUE_WAIT_SET_SCH_WAVE: Set context switch latency wait + * + */ +enum kfd_config_dequeue_wait_counts_cmd { + KFD_DEQUEUE_WAIT_INIT = 1, + KFD_DEQUEUE_WAIT_RESET = 2, + KFD_DEQUEUE_WAIT_SET_SCH_WAVE = 3 +}; + + struct packet_manager { struct device_queue_manager *dqm; struct kernel_queue *priv_queue; @@ -1414,8 +1432,8 @@ struct packet_manager_funcs { int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, enum kfd_unmap_queues_filter mode, uint32_t filter_param, bool reset); - int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer, - uint32_t grace_period); + int (*config_dequeue_wait_counts)(struct packet_manager *pm, uint32_t *buffer, + enum kfd_config_dequeue_wait_counts_cmd cmd, uint32_t value); int (*query_status)(struct packet_manager *pm, uint32_t *buffer, uint64_t fence_address, uint64_t fence_value); int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); @@ -1426,7 +1444,7 @@ struct packet_manager_funcs { int set_resources_size; int map_queues_size; int unmap_queues_size; - int set_grace_period_size; + int config_dequeue_wait_counts_size; int query_status_size; int release_mem_size; }; @@ -1449,7 +1467,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, void pm_release_ib(struct packet_manager *pm); -int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period); +int pm_config_dequeue_wait_counts(struct packet_manager *pm, + enum kfd_config_dequeue_wait_counts_cmd cmd, + uint32_t wait_counts_config); /* Following PM funcs can be shared among VI and AI */ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); -- 2.51.0 From 0d9cabc8f591ea1cd97c071b853b75b155c13259 Mon Sep 17 00:00:00 2001 From: Leon Huang Date: Tue, 11 Feb 2025 15:45:43 +0800 Subject: [PATCH 03/16] drm/amd/display: Fix incorrect DPCD configs while Replay/PSR switch [Why] When switching between PSR/Replay, the DPCD config of previous mode is not cleared, resulting in unexpected behavior in TCON. [How] Initialize the DPCD in setup function Reviewed-by: Robin Chen Signed-off-by: Leon Huang Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../link/protocols/link_edp_panel_control.c | 25 ++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index e0e3bb865359..1e4adbc764ea 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -675,6 +675,18 @@ bool edp_setup_psr(struct dc_link *link, if (!link) return false; + //Clear PSR cfg + memset(&psr_configuration, 0, sizeof(psr_configuration)); + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_PSR_EN_CFG, + &psr_configuration.raw, + sizeof(psr_configuration.raw)); + + if (link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) + return false; + dc = link->ctx->dc; dmcu = dc->res_pool->dmcu; psr = dc->res_pool->psr; @@ -685,9 +697,6 @@ bool edp_setup_psr(struct dc_link *link, if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; - - memset(&psr_configuration, 0, sizeof(psr_configuration)); - psr_configuration.bits.ENABLE = 1; psr_configuration.bits.CRC_VERIFICATION = 1; psr_configuration.bits.FRAME_CAPTURE_INDICATION = @@ -950,6 +959,16 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream if (!link) return false; + //Clear Replay config + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_SINK_PR_ENABLE_AND_CONFIGURATION, + (uint8_t *)&(replay_config.raw), sizeof(uint8_t)); + + if (!(link->replay_settings.config.replay_supported)) + return false; + + link->replay_settings.config.replay_error_status.raw = 0; + dc = link->ctx->dc; replay = dc->res_pool->replay; -- 2.51.0 From 7b1ba19eb15f88e70782642ce2d934211269337b Mon Sep 17 00:00:00 2001 From: Leo Li Date: Thu, 20 Feb 2025 16:20:26 -0500 Subject: [PATCH 04/16] drm/amd/display: Disable unneeded hpd interrupts during dm_init [Why] It seems HPD interrupts are enabled by default for all connectors, even if the hpd source isn't valid. An eDP for example, does not have a valid hpd source (but does have a valid hpdrx source; see construct_phy()). Thus, eDPs should have their hpd interrupt disabled. In the past, this wasn't really an issue. Although the driver gets interrupted, then acks by writing to hw registers, there weren't any subscribed handlers that did anything meaningful (see register_hpd_handlers()). But things changed with the introduction of IPS. s2idle requires that the driver allows IPS for DMUB fw to put hw to sleep. Since register access requires hw to be awake, the driver will block IPS entry to do so. And no IPS means no hw sleep during s2idle. This was the observation on DCN35 systems with an eDP. During suspend, the eDP toggled its hpd pin as part of the panel power down sequence. The driver was then interrupted, and acked by writing to registers, blocking IPS entry. [How] Since DC marks eDP connections as having invalid hpd sources (see construct_phy()), DM should disable them at the hw level. Do so in amdgpu_dm_hpd_init() by disabling all hpd ints first, then selectively enabling ones for connectors that have valid hpd sources. Cc: Mario Limonciello Cc: Alex Deucher Reviewed-by: Harry Wentland Signed-off-by: Leo Li Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 64 +++++++++++++------ 1 file changed, 45 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 2b63cbab0e87..b61e210f6246 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -890,8 +890,16 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; + int irq_type; int i; + /* First, clear all hpd and hpdrx interrupts */ + for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) { + if (!dc_interrupt_set(adev->dm.dc, i, false)) + drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n", + i); + } + drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector; @@ -904,10 +912,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) dc_link = amdgpu_dm_connector->dc_link; + /* + * Get a base driver irq reference for hpd ints for the lifetime + * of dm. Note that only hpd interrupt types are registered with + * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on + * hpd_rx isn't available. DM currently controls hpd_rx + * explicitly with dc_interrupt_set() + */ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { - dc_interrupt_set(adev->dm.dc, - dc_link->irq_source_hpd, - true); + irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1; + /* + * TODO: There's a mismatch between mode_info.num_hpd + * and what bios reports as the # of connectors with hpd + * sources. Since the # of hpd source types registered + * with base driver == mode_info.num_hpd, we have to + * fallback to dc_interrupt_set for the remaining types. + */ + if (irq_type < adev->mode_info.num_hpd) { + if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type)) + drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", + dc_link->irq_source_hpd); + } else { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + true); + } } if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { @@ -917,12 +946,6 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) } } drm_connector_list_iter_end(&iter); - - /* Update reference counts for HPDs */ - for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) { - if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1)) - drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i); - } } /** @@ -938,7 +961,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; - int i; + int irq_type; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -952,9 +975,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) dc_link = amdgpu_dm_connector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { - dc_interrupt_set(adev->dm.dc, - dc_link->irq_source_hpd, - false); + irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1; + + /* TODO: See same TODO in amdgpu_dm_hpd_init() */ + if (irq_type < adev->mode_info.num_hpd) { + if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type)) + drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", + dc_link->irq_source_hpd); + } else { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + false); + } } if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { @@ -964,10 +996,4 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) } } drm_connector_list_iter_end(&iter); - - /* Update reference counts for HPDs */ - for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) { - if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1)) - drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i); - } } -- 2.51.0 From d93b92c976671bf3352ad808c3783d37f62e9a0a Mon Sep 17 00:00:00 2001 From: Joshua Aberback Date: Tue, 25 Feb 2025 16:17:25 -0500 Subject: [PATCH 05/16] drm/amd/display: Add more debug data to dmub_srv [Why] When analyzing some crash dumps, not all of the expected DMUB info was available, so we want to add in-object storage for this data. [How] - dmub_srv_debug (renamed to dmub_timeout_info) is already a member of dmub_diagnostic_data, therefore keep a dmub_diagnostic_data directly in dmub_srv - use dmub_srv->debug when collecting diagnostic info instead of stack object to allow for easy inspection in crash dumps Reviewed-by: Alvin Lee Signed-off-by: Joshua Aberback Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 89 +++++++++-------- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h | 2 +- drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 10 +- .../gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 85 ++++++++-------- .../gpu/drm/amd/display/dmub/src/dmub_dcn20.h | 2 +- .../gpu/drm/amd/display/dmub/src/dmub_dcn31.c | 93 +++++++++--------- .../gpu/drm/amd/display/dmub/src/dmub_dcn31.h | 2 +- .../gpu/drm/amd/display/dmub/src/dmub_dcn32.c | 98 ++++++++++--------- .../gpu/drm/amd/display/dmub/src/dmub_dcn32.h | 2 +- .../gpu/drm/amd/display/dmub/src/dmub_dcn35.c | 93 +++++++++--------- .../gpu/drm/amd/display/dmub/src/dmub_dcn35.h | 2 +- .../drm/amd/display/dmub/src/dmub_dcn401.c | 97 +++++++++--------- .../drm/amd/display/dmub/src/dmub_dcn401.h | 2 +- .../gpu/drm/amd/display/dmub/src/dmub_srv.c | 6 +- 14 files changed, 298 insertions(+), 285 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 87b4c2793df3..614e03bfd598 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -200,10 +200,10 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, if (status != DMUB_STATUS_OK) { DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); - if (!dmub->debug.timeout_occured) { - dmub->debug.timeout_occured = true; - dmub->debug.timeout_cmd = *cmd_list; - dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx); + if (!dmub->debug.timeout_info.timeout_occured) { + dmub->debug.timeout_info.timeout_occured = true; + dmub->debug.timeout_info.timeout_cmd = *cmd_list; + dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx); } dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; @@ -927,16 +927,15 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } -bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) +bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) { - if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data) + if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; - return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data); + return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub); } void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) { - struct dmub_diagnostic_data diag_data = {0}; uint32_t i; if (!dc_dmub_srv || !dc_dmub_srv->dmub) { @@ -946,49 +945,49 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__); - if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) { + if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv)) { DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); return; } DC_LOG_DEBUG("DMCUB STATE:"); - DC_LOG_DEBUG(" dmcub_version : %08x", diag_data.dmcub_version); - DC_LOG_DEBUG(" scratch [0] : %08x", diag_data.scratch[0]); - DC_LOG_DEBUG(" scratch [1] : %08x", diag_data.scratch[1]); - DC_LOG_DEBUG(" scratch [2] : %08x", diag_data.scratch[2]); - DC_LOG_DEBUG(" scratch [3] : %08x", diag_data.scratch[3]); - DC_LOG_DEBUG(" scratch [4] : %08x", diag_data.scratch[4]); - DC_LOG_DEBUG(" scratch [5] : %08x", diag_data.scratch[5]); - DC_LOG_DEBUG(" scratch [6] : %08x", diag_data.scratch[6]); - DC_LOG_DEBUG(" scratch [7] : %08x", diag_data.scratch[7]); - DC_LOG_DEBUG(" scratch [8] : %08x", diag_data.scratch[8]); - DC_LOG_DEBUG(" scratch [9] : %08x", diag_data.scratch[9]); - DC_LOG_DEBUG(" scratch [10] : %08x", diag_data.scratch[10]); - DC_LOG_DEBUG(" scratch [11] : %08x", diag_data.scratch[11]); - DC_LOG_DEBUG(" scratch [12] : %08x", diag_data.scratch[12]); - DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]); - DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]); - DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]); + DC_LOG_DEBUG(" dmcub_version : %08x", dc_dmub_srv->dmub->debug.dmcub_version); + DC_LOG_DEBUG(" scratch [0] : %08x", dc_dmub_srv->dmub->debug.scratch[0]); + DC_LOG_DEBUG(" scratch [1] : %08x", dc_dmub_srv->dmub->debug.scratch[1]); + DC_LOG_DEBUG(" scratch [2] : %08x", dc_dmub_srv->dmub->debug.scratch[2]); + DC_LOG_DEBUG(" scratch [3] : %08x", dc_dmub_srv->dmub->debug.scratch[3]); + DC_LOG_DEBUG(" scratch [4] : %08x", dc_dmub_srv->dmub->debug.scratch[4]); + DC_LOG_DEBUG(" scratch [5] : %08x", dc_dmub_srv->dmub->debug.scratch[5]); + DC_LOG_DEBUG(" scratch [6] : %08x", dc_dmub_srv->dmub->debug.scratch[6]); + DC_LOG_DEBUG(" scratch [7] : %08x", dc_dmub_srv->dmub->debug.scratch[7]); + DC_LOG_DEBUG(" scratch [8] : %08x", dc_dmub_srv->dmub->debug.scratch[8]); + DC_LOG_DEBUG(" scratch [9] : %08x", dc_dmub_srv->dmub->debug.scratch[9]); + DC_LOG_DEBUG(" scratch [10] : %08x", dc_dmub_srv->dmub->debug.scratch[10]); + DC_LOG_DEBUG(" scratch [11] : %08x", dc_dmub_srv->dmub->debug.scratch[11]); + DC_LOG_DEBUG(" scratch [12] : %08x", dc_dmub_srv->dmub->debug.scratch[12]); + DC_LOG_DEBUG(" scratch [13] : %08x", dc_dmub_srv->dmub->debug.scratch[13]); + DC_LOG_DEBUG(" scratch [14] : %08x", dc_dmub_srv->dmub->debug.scratch[14]); + DC_LOG_DEBUG(" scratch [15] : %08x", dc_dmub_srv->dmub->debug.scratch[15]); for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++) - DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]); - DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr); - DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr); - DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr); - DC_LOG_DEBUG(" inbox1_rptr : %08x", diag_data.inbox1_rptr); - DC_LOG_DEBUG(" inbox1_wptr : %08x", diag_data.inbox1_wptr); - DC_LOG_DEBUG(" inbox1_size : %08x", diag_data.inbox1_size); - DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr); - DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr); - DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size); - DC_LOG_DEBUG(" outbox1_rptr : %08x", diag_data.outbox1_rptr); - DC_LOG_DEBUG(" outbox1_wptr : %08x", diag_data.outbox1_wptr); - DC_LOG_DEBUG(" outbox1_size : %08x", diag_data.outbox1_size); - DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled); - DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset); - DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset); - DC_LOG_DEBUG(" is_traceport_en : %d", diag_data.is_traceport_en); - DC_LOG_DEBUG(" is_cw0_en : %d", diag_data.is_cw0_enabled); - DC_LOG_DEBUG(" is_cw6_en : %d", diag_data.is_cw6_enabled); + DC_LOG_DEBUG(" pc[%d] : %08x", i, dc_dmub_srv->dmub->debug.pc[i]); + DC_LOG_DEBUG(" unk_fault_addr : %08x", dc_dmub_srv->dmub->debug.undefined_address_fault_addr); + DC_LOG_DEBUG(" inst_fault_addr : %08x", dc_dmub_srv->dmub->debug.inst_fetch_fault_addr); + DC_LOG_DEBUG(" data_fault_addr : %08x", dc_dmub_srv->dmub->debug.data_write_fault_addr); + DC_LOG_DEBUG(" inbox1_rptr : %08x", dc_dmub_srv->dmub->debug.inbox1_rptr); + DC_LOG_DEBUG(" inbox1_wptr : %08x", dc_dmub_srv->dmub->debug.inbox1_wptr); + DC_LOG_DEBUG(" inbox1_size : %08x", dc_dmub_srv->dmub->debug.inbox1_size); + DC_LOG_DEBUG(" inbox0_rptr : %08x", dc_dmub_srv->dmub->debug.inbox0_rptr); + DC_LOG_DEBUG(" inbox0_wptr : %08x", dc_dmub_srv->dmub->debug.inbox0_wptr); + DC_LOG_DEBUG(" inbox0_size : %08x", dc_dmub_srv->dmub->debug.inbox0_size); + DC_LOG_DEBUG(" outbox1_rptr : %08x", dc_dmub_srv->dmub->debug.outbox1_rptr); + DC_LOG_DEBUG(" outbox1_wptr : %08x", dc_dmub_srv->dmub->debug.outbox1_wptr); + DC_LOG_DEBUG(" outbox1_size : %08x", dc_dmub_srv->dmub->debug.outbox1_size); + DC_LOG_DEBUG(" is_enabled : %d", dc_dmub_srv->dmub->debug.is_dmcub_enabled); + DC_LOG_DEBUG(" is_soft_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_soft_reset); + DC_LOG_DEBUG(" is_secure_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_secure_reset); + DC_LOG_DEBUG(" is_traceport_en : %d", dc_dmub_srv->dmub->debug.is_traceport_en); + DC_LOG_DEBUG(" is_cw0_en : %d", dc_dmub_srv->dmub->debug.is_cw0_enabled); + DC_LOG_DEBUG(" is_cw6_en : %d", dc_dmub_srv->dmub->debug.is_cw6_enabled); } static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 10b48198b7a6..a636f4c3f01d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -94,7 +94,7 @@ void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv); void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv); void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data); -bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca); +bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv); void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable); void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 0787cc3904d6..8ebeb34021e5 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -313,7 +313,7 @@ struct dmub_srv_hw_params { * @timeout_occured: Indicates a timeout occured on any message from driver to dmub * @timeout_cmd: first cmd sent from driver that timed out - subsequent timeouts are not stored */ -struct dmub_srv_debug { +struct dmub_timeout_info { bool timeout_occured; union dmub_rb_cmd timeout_cmd; unsigned long long timestamp; @@ -340,7 +340,7 @@ struct dmub_diagnostic_data { uint32_t outbox1_wptr; uint32_t outbox1_size; uint32_t gpint_datain0; - struct dmub_srv_debug timeout_info; + struct dmub_timeout_info timeout_info; uint8_t is_dmcub_enabled : 1; uint8_t is_dmcub_soft_reset : 1; uint8_t is_dmcub_secure_reset : 1; @@ -456,7 +456,7 @@ struct dmub_srv_hw_funcs { void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data); uint32_t (*get_current_time)(struct dmub_srv *dmub); - void (*get_diagnostic_data)(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca); + void (*get_diagnostic_data)(struct dmub_srv *dmub); bool (*should_detect)(struct dmub_srv *dmub); void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx); @@ -545,7 +545,7 @@ struct dmub_srv { struct dmub_visual_confirm_color visual_confirm_color; enum dmub_srv_power_state_type power_state; - struct dmub_srv_debug debug; + struct dmub_diagnostic_data debug; }; /** @@ -901,7 +901,7 @@ enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry); -bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); +bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub); bool dmub_srv_should_detect(struct dmub_srv *dmub); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index e500ca9ae09c..73888c1bea93 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -414,63 +414,66 @@ uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub) return REG_READ(DMCUB_TIMER_CURRENT); } -void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) +void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub) { uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; + struct dmub_timeout_info timeout = {0}; - if (!dmub || !diag_data) + if (!dmub) return; - memset(diag_data, 0, sizeof(*diag_data)); - - diag_data->dmcub_version = dmub->fw_version; - - diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0); - diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1); - diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2); - diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3); - diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4); - diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5); - diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6); - diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7); - diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8); - diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9); - diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10); - diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11); - diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12); - diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); - diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); - diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); - - diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); - diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); - diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); - - diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); - diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); - diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); - - diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); - diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); - diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); + /* timeout data filled externally, cache before resetting memory */ + timeout = dmub->debug.timeout_info; + memset(&dmub->debug, 0, sizeof(dmub->debug)); + dmub->debug.timeout_info = timeout; + + dmub->debug.dmcub_version = dmub->fw_version; + + dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0); + dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1); + dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2); + dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3); + dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4); + dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5); + dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6); + dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7); + dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8); + dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9); + dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10); + dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11); + dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12); + dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13); + dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14); + dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15); + + dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); + dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); + dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); + + dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); + dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); + dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); + + dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); + dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); + dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); - diag_data->is_dmcub_enabled = is_dmub_enabled; + dmub->debug.is_dmcub_enabled = is_dmub_enabled; REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &is_soft_reset); - diag_data->is_dmcub_soft_reset = is_soft_reset; + dmub->debug.is_dmcub_soft_reset = is_soft_reset; REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); - diag_data->is_dmcub_secure_reset = is_sec_reset; + dmub->debug.is_dmcub_secure_reset = is_sec_reset; REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); - diag_data->is_traceport_en = is_traceport_enabled; + dmub->debug.is_traceport_en = is_traceport_enabled; REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); - diag_data->is_cw0_enabled = is_cw0_enabled; + dmub->debug.is_cw0_enabled = is_cw0_enabled; REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); - diag_data->is_cw6_enabled = is_cw6_enabled; - diag_data->timeout_info = dmub->debug; + dmub->debug.is_cw6_enabled = is_cw6_enabled; } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index de287b101848..42c1fb4bc73f 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -247,6 +247,6 @@ bool dmub_dcn20_use_cached_trace_buffer(struct dmub_srv *dmub); uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub); -void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca); +void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub); #endif /* _DMUB_DCN20_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 9796077885c9..a308bd604677 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -414,69 +414,72 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub) return REG_READ(DMCUB_TIMER_CURRENT); } -void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) +void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub) { uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; + struct dmub_timeout_info timeout = {0}; - if (!dmub || !diag_data) + if (!dmub) return; - memset(diag_data, 0, sizeof(*diag_data)); - - diag_data->dmcub_version = dmub->fw_version; - - diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0); - diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1); - diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2); - diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3); - diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4); - diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5); - diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6); - diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7); - diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8); - diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9); - diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10); - diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11); - diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12); - diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); - diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); - diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); - - diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); - diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); - diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); - - diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); - diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); - diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); - - diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); - diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); - diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); - - diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); - diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); - diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); + /* timeout data filled externally, cache before resetting memory */ + timeout = dmub->debug.timeout_info; + memset(&dmub->debug, 0, sizeof(dmub->debug)); + dmub->debug.timeout_info = timeout; + + dmub->debug.dmcub_version = dmub->fw_version; + + dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0); + dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1); + dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2); + dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3); + dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4); + dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5); + dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6); + dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7); + dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8); + dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9); + dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10); + dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11); + dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12); + dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13); + dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14); + dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15); + + dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); + dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); + dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); + + dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); + dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); + dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); + + dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); + dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); + dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); + + dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); + dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); + dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); - diag_data->is_dmcub_enabled = is_dmub_enabled; + dmub->debug.is_dmcub_enabled = is_dmub_enabled; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); - diag_data->is_dmcub_soft_reset = is_soft_reset; + dmub->debug.is_dmcub_soft_reset = is_soft_reset; REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); - diag_data->is_dmcub_secure_reset = is_sec_reset; + dmub->debug.is_dmcub_secure_reset = is_sec_reset; REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); - diag_data->is_traceport_en = is_traceport_enabled; + dmub->debug.is_traceport_en = is_traceport_enabled; REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); - diag_data->is_cw0_enabled = is_cw0_enabled; + dmub->debug.is_cw0_enabled = is_cw0_enabled; REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); - diag_data->is_cw6_enabled = is_cw6_enabled; - diag_data->timeout_info = dmub->debug; + dmub->debug.is_cw6_enabled = is_cw6_enabled; } bool dmub_dcn31_should_detect(struct dmub_srv *dmub) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index eccdab4986ce..1c43ef2bca66 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -251,7 +251,7 @@ void dmub_dcn31_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset); uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub); -void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); +void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub); bool dmub_dcn31_should_detect(struct dmub_srv *dmub); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index 9600b7f858b0..e7056205b050 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -417,73 +417,75 @@ uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub) return REG_READ(DMCUB_TIMER_CURRENT); } -void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) +void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub) { uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; + struct dmub_timeout_info timeout = {0}; - if (!dmub || !diag_data) + if (!dmub) return; - memset(diag_data, 0, sizeof(*diag_data)); - - diag_data->dmcub_version = dmub->fw_version; - - diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0); - diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1); - diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2); - diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3); - diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4); - diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5); - diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6); - diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7); - diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8); - diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9); - diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10); - diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11); - diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12); - diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); - diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); - diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); - diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16); - - diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); - diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); - diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); - - diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); - diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); - diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); - - diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); - diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); - diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); - - diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); - diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); - diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); + /* timeout data filled externally, cache before resetting memory */ + timeout = dmub->debug.timeout_info; + memset(&dmub->debug, 0, sizeof(dmub->debug)); + dmub->debug.timeout_info = timeout; + + dmub->debug.dmcub_version = dmub->fw_version; + + dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0); + dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1); + dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2); + dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3); + dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4); + dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5); + dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6); + dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7); + dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8); + dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9); + dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10); + dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11); + dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12); + dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13); + dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14); + dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15); + dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16); + + dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); + dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); + dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); + + dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); + dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); + dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); + + dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); + dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); + dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); + + dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); + dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); + dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); - diag_data->is_dmcub_enabled = is_dmub_enabled; + dmub->debug.is_dmcub_enabled = is_dmub_enabled; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); - diag_data->is_dmcub_soft_reset = is_soft_reset; + dmub->debug.is_dmcub_soft_reset = is_soft_reset; REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); - diag_data->is_dmcub_secure_reset = is_sec_reset; + dmub->debug.is_dmcub_secure_reset = is_sec_reset; REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); - diag_data->is_traceport_en = is_traceport_enabled; + dmub->debug.is_traceport_en = is_traceport_enabled; REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); - diag_data->is_cw0_enabled = is_cw0_enabled; + dmub->debug.is_cw0_enabled = is_cw0_enabled; REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); - diag_data->is_cw6_enabled = is_cw6_enabled; + dmub->debug.is_cw6_enabled = is_cw6_enabled; - diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); - - diag_data->timeout_info = dmub->debug; + dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); } void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h index 29c1132951af..1a229450c53d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h @@ -254,7 +254,7 @@ void dmub_dcn32_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset); uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub); -void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); +void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub); void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub); void dmub_dcn32_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 01d013a12b94..72a0f078cd1a 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -462,66 +462,69 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub) return REG_READ(DMCUB_TIMER_CURRENT); } -void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) +void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub) { uint32_t is_dmub_enabled, is_soft_reset; uint32_t is_traceport_enabled, is_cw6_enabled; + struct dmub_timeout_info timeout = {0}; - if (!dmub || !diag_data) + if (!dmub) return; - memset(diag_data, 0, sizeof(*diag_data)); - - diag_data->dmcub_version = dmub->fw_version; - - diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0); - diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1); - diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2); - diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3); - diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4); - diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5); - diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6); - diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7); - diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8); - diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9); - diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10); - diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11); - diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12); - diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); - diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); - diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); - diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16); - - diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); - diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); - diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); - - diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); - diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); - diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); - - diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); - diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); - diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); - - diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); - diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); - diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); + /* timeout data filled externally, cache before resetting memory */ + timeout = dmub->debug.timeout_info; + memset(&dmub->debug, 0, sizeof(dmub->debug)); + dmub->debug.timeout_info = timeout; + + dmub->debug.dmcub_version = dmub->fw_version; + + dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0); + dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1); + dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2); + dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3); + dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4); + dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5); + dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6); + dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7); + dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8); + dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9); + dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10); + dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11); + dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12); + dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13); + dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14); + dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15); + dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16); + + dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); + dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); + dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); + + dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); + dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); + dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); + + dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); + dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); + dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); + + dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); + dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); + dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); - diag_data->is_dmcub_enabled = is_dmub_enabled; + dmub->debug.is_dmcub_enabled = is_dmub_enabled; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); - diag_data->is_dmcub_soft_reset = is_soft_reset; + dmub->debug.is_dmcub_soft_reset = is_soft_reset; REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); - diag_data->is_traceport_en = is_traceport_enabled; + dmub->debug.is_traceport_en = is_traceport_enabled; REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); - diag_data->is_cw6_enabled = is_cw6_enabled; + dmub->debug.is_cw6_enabled = is_cw6_enabled; - diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); - diag_data->timeout_info = dmub->debug; + dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); } void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h index 686e97c00ccc..39fcb7275da5 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h @@ -269,7 +269,7 @@ void dmub_dcn35_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset); uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub); -void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); +void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub); void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c index e1c4fe1c6e3e..e67f7c4784eb 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c @@ -415,72 +415,75 @@ uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub) return REG_READ(DMCUB_TIMER_CURRENT); } -void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) +void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub) { uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; + struct dmub_timeout_info timeout = {0}; - if (!dmub || !diag_data) + if (!dmub) return; - memset(diag_data, 0, sizeof(*diag_data)); - - diag_data->dmcub_version = dmub->fw_version; - - diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0); - diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1); - diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2); - diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3); - diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4); - diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5); - diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6); - diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7); - diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8); - diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9); - diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10); - diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11); - diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12); - diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); - diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); - diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); - diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16); - - diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); - diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); - diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); - - diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); - diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); - diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); - - diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); - diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); - diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); - - diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); - diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); - diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); + /* timeout data filled externally, cache before resetting memory */ + timeout = dmub->debug.timeout_info; + memset(&dmub->debug, 0, sizeof(dmub->debug)); + dmub->debug.timeout_info = timeout; + + dmub->debug.dmcub_version = dmub->fw_version; + + dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0); + dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1); + dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2); + dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3); + dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4); + dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5); + dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6); + dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7); + dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8); + dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9); + dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10); + dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11); + dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12); + dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13); + dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14); + dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15); + dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16); + + dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); + dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); + dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); + + dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); + dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); + dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); + + dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); + dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); + dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); + + dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); + dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); + dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); - diag_data->is_dmcub_enabled = is_dmub_enabled; + dmub->debug.is_dmcub_enabled = is_dmub_enabled; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); - diag_data->is_dmcub_soft_reset = is_soft_reset; + dmub->debug.is_dmcub_soft_reset = is_soft_reset; REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); - diag_data->is_dmcub_secure_reset = is_sec_reset; + dmub->debug.is_dmcub_secure_reset = is_sec_reset; REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); - diag_data->is_traceport_en = is_traceport_enabled; + dmub->debug.is_traceport_en = is_traceport_enabled; REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); - diag_data->is_cw0_enabled = is_cw0_enabled; + dmub->debug.is_cw0_enabled = is_cw0_enabled; REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); - diag_data->is_cw6_enabled = is_cw6_enabled; + dmub->debug.is_cw6_enabled = is_cw6_enabled; - diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); - diag_data->timeout_info = dmub->debug; + dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); } void dmub_dcn401_configure_dmub_in_system_memory(struct dmub_srv *dmub) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h index 31f95b27e227..c35be52676f6 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h @@ -264,7 +264,7 @@ void dmub_dcn401_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset); uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub); -void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); +void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub); void dmub_dcn401_configure_dmub_in_system_memory(struct dmub_srv *dmub); void dmub_dcn401_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 6133d25da301..0a2285540daf 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -1099,11 +1099,11 @@ bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entr return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry); } -bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) +bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub) { - if (!dmub || !dmub->hw_funcs.get_diagnostic_data || !diag_data) + if (!dmub || !dmub->hw_funcs.get_diagnostic_data) return false; - dmub->hw_funcs.get_diagnostic_data(dmub, diag_data); + dmub->hw_funcs.get_diagnostic_data(dmub); return true; } -- 2.51.0 From 0747acf3311229e22009bec4a9e7fc30c879e842 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Sat, 22 Feb 2025 23:37:32 -0600 Subject: [PATCH 06/16] drm/amd/display: fix default brightness [Why] To avoid flickering during boot default brightness level set by BIOS should be maintained for as much of the boot as feasible. commit 2fe87f54abdc ("drm/amd/display: Set default brightness according to ACPI") attempted to set the right levels for AC vs DC, but brightness still got reset to maximum level in initialization code for setup_backlight_device(). [How] Remove the hardcoded initialization in setup_backlight_device() and instead program brightness value to match BIOS (AC or DC). This avoids a brightness flicker from kernel changing the value. Userspace may however still change it during boot. Fixes: 2fe87f54abdc ("drm/amd/display: Set default brightness according to ACPI") Acked-by: Wayne Lin Signed-off-by: Mario Limonciello Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6a54f1cfa125..5ae521d1b74b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4986,6 +4986,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) dm->backlight_dev[aconnector->bl_idx] = backlight_device_register(bl_name, aconnector->base.kdev, dm, &amdgpu_dm_backlight_ops, &props); + dm->brightness[aconnector->bl_idx] = props.brightness; if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { DRM_ERROR("DM: Backlight registration failed!\n"); @@ -5053,7 +5054,6 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm, aconnector->bl_idx = bl_idx; amdgpu_dm_update_backlight_caps(dm, bl_idx); - dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; dm->backlight_link[bl_idx] = link; dm->num_of_edps++; -- 2.51.0 From 5e19e2b57b6bb640d68dfc7991e1e182922cf867 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Sun, 23 Feb 2025 00:04:35 -0600 Subject: [PATCH 07/16] drm/amd/display: Restore correct backlight brightness after a GPU reset [Why] GPU reset will attempt to restore cached state, but brightness doesn't get restored. It will come back at 100% brightness, but userspace thinks it's the previous value. [How] When running resume sequence if GPU is in reset restore brightness to previous value. Acked-by: Wayne Lin Signed-off-by: Mario Limonciello Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 5ae521d1b74b..bf69cd50712e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -250,6 +250,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); static void handle_hpd_rx_irq(void *param); +static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, + int bl_idx, + u32 user_brightness); + static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state); @@ -3432,6 +3436,12 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) mutex_unlock(&dm->dc_lock); + /* set the backlight after a reset */ + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + return 0; } -- 2.51.0 From 50e0bae34fa6b8b18e13473ddf0bcdab6ab68310 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Thu, 13 Feb 2025 16:26:22 -0600 Subject: [PATCH 08/16] drm/amd/display: Add and use new dm_prepare_suspend() callback [Why] The displays currently don't get turned off until after other IP blocks have been suspended. However turning off the displays first gives a very visible response that the system is on it's way down. [How] Turn off displays in a prepare_suspend() callback instead when possible. This will help for suspend and hibernate sequences. The shutdown sequence however will not call prepare() so check whether the state has been already saved to decide what to do. Acked-by: Wayne Lin Signed-off-by: Mario Limonciello Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index bf69cd50712e..4f1cf8afe25f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3141,6 +3141,21 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) } } +static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + + if (amdgpu_in_reset(adev)) + return 0; + + WARN_ON(adev->dm.cached_state); + adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (IS_ERR(adev->dm.cached_state)) + return PTR_ERR(adev->dm.cached_state); + + return 0; +} + static int dm_suspend(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -3171,10 +3186,11 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block) return 0; } - WARN_ON(adev->dm.cached_state); - adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); - if (IS_ERR(adev->dm.cached_state)) - return PTR_ERR(adev->dm.cached_state); + if (!adev->dm.cached_state) { + adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (IS_ERR(adev->dm.cached_state)) + return PTR_ERR(adev->dm.cached_state); + } s3_handle_hdmi_cec(adev_to_drm(adev), true); @@ -3606,6 +3622,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .early_fini = amdgpu_dm_early_fini, .hw_init = dm_hw_init, .hw_fini = dm_hw_fini, + .prepare_suspend = dm_prepare_suspend, .suspend = dm_suspend, .resume = dm_resume, .is_idle = dm_is_idle, -- 2.51.0 From bd00b29b5f236dce677089319176dee5872b5a7a Mon Sep 17 00:00:00 2001 From: Danny Wang Date: Thu, 13 Feb 2025 16:18:34 +0800 Subject: [PATCH 09/16] drm/amd/display: Do not enable replay when vtotal update is pending. [Why&How] Vtotal is not applied to HW when handling vsync interrupt. Make sure vtotal is aligned before enable replay. Reviewed-by: Anthony Koo Reviewed-by: Robin Chen Signed-off-by: Danny Wang Signed-off-by: Zhongwei Zhang Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 9 +++++++-- .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 15 +++++++++++++++ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 1 + .../drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 7 ++----- .../drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c | 7 ++----- .../drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 8 ++------ .../drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c | 4 +--- .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 3 +-- .../drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c | 10 +++------- .../gpu/drm/amd/display/dc/hwss/hw_sequencer.h | 6 ++++++ 10 files changed, 40 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index e71ea21401f5..5a43e4901cc0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -453,6 +453,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, if (dc->caps.max_v_total != 0 && (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) { + stream->adjust.timing_adjust_pending = false; if (adjust->allow_otg_v_count_halt) return set_long_vtotal(dc, stream, adjust); else @@ -466,7 +467,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, dc->hwss.set_drr(&pipe, 1, *adjust); - + stream->adjust.timing_adjust_pending = false; return true; } } @@ -3165,8 +3166,12 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->vrr_active_fixed) stream->vrr_active_fixed = *update->vrr_active_fixed; - if (update->crtc_timing_adjust) + if (update->crtc_timing_adjust) { + if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min || + stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max) + stream->adjust.timing_adjust_pending = true; stream->adjust = *update->crtc_timing_adjust; + } if (update->dpms_off) stream->dpms_off = *update->dpms_off; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index e0277728268a..52ee2225e132 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -659,6 +659,21 @@ void set_p_state_switch_method( } } +void set_drr_and_clear_adjust_pending( + struct pipe_ctx *pipe_ctx, + struct dc_stream_state *stream, + struct drr_params *params) +{ + /* params can be null.*/ + if (pipe_ctx && pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->set_drr) + pipe_ctx->stream_res.tg->funcs->set_drr( + pipe_ctx->stream_res.tg, params); + + if (stream) + stream->adjust.timing_adjust_pending = false; +} + void get_fams2_visual_confirm_color( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 9f3dd8824ed5..d562ddeca512 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -1017,6 +1017,7 @@ struct dc_crtc_timing_adjust { uint32_t v_total_mid; uint32_t v_total_mid_frame_num; uint32_t allow_otg_v_count_halt; + uint8_t timing_adjust_pending; }; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 9c9947fc5d44..bfd734e15731 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1658,9 +1658,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx, stream, ¶ms); // DRR should set trigger event to monitor surface update event if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) @@ -2109,8 +2107,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx, struct timing_generator *tg = pipe_ctx[i]->stream_res.tg; if ((tg != NULL) && tg->funcs) { - if (tg->funcs->set_drr) - tg->funcs->set_drr(tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, ¶ms); if (adjust.v_total_max != 0 && adjust.v_total_min != 0) if (tg->funcs->set_static_screen_control) tg->funcs->set_static_screen_control( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 301ef36d3d05..912f96323ed6 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -1113,9 +1113,7 @@ static void dcn10_reset_back_end_for_pipe( pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, NULL); + set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL); if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0; } @@ -3218,8 +3216,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx, struct timing_generator *tg = pipe_ctx[i]->stream_res.tg; if ((tg != NULL) && tg->funcs) { - if (tg->funcs->set_drr) - tg->funcs->set_drr(tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, ¶ms); if (adjust.v_total_max != 0 && adjust.v_total_min != 0) if (tg->funcs->set_static_screen_control) tg->funcs->set_static_screen_control( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index a5a3e0823e21..926c08e790c1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -952,9 +952,7 @@ enum dc_status dcn20_enable_stream_timing( params.vertical_total_max = stream->adjust.v_total_max; params.vertical_total_mid = stream->adjust.v_total_mid; params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num; - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx, stream, ¶ms); // DRR should set trigger event to monitor surface update event if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) @@ -2856,9 +2854,7 @@ void dcn20_reset_back_end_for_pipe( pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, NULL); + set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL); /* TODO - convert symclk_ref_cnts for otg to a bit map to solve * the case where the same symclk is shared across multiple otg * instances diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 288e9dd9205d..f38340aa3f15 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -543,9 +543,7 @@ static void dcn31_reset_back_end_for_pipe( if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0; - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, NULL); + set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL); /* DPMS may already disable or */ /* dpms_off status is incorrect due to fastboot diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index b907ad1acedd..922b8d71cf1a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1473,8 +1473,7 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx, num_frames = 2 * (frame_rate % 60); } } - if (tg->funcs->set_drr) - tg->funcs->set_drr(tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, ¶ms); if (adjust.v_total_max != 0 && adjust.v_total_min != 0) if (tg->funcs->set_static_screen_control) tg->funcs->set_static_screen_control( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 39668d8cc13a..8f5da0ded850 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -830,10 +830,7 @@ enum dc_status dcn401_enable_stream_timing( } hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); - - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx, stream, ¶ms); /* Event triggers and num frames initialized for DRR, but can be * later updated for PSR use. Note DRR trigger events are generated @@ -1820,9 +1817,8 @@ void dcn401_reset_back_end_for_pipe( pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, NULL); + set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL); + /* TODO - convert symclk_ref_cnts for otg to a bit map to solve * the case where the same symclk is shared across multiple otg * instances diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 2b1a2a00648a..c8b5ed834579 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -46,6 +46,7 @@ struct dce_hwseq; struct link_resource; struct dc_dmub_cmd; struct pg_block_update; +struct drr_params; struct subvp_pipe_control_lock_fast_params { struct dc *dc; @@ -527,6 +528,11 @@ void set_p_state_switch_method( struct dc_state *context, struct pipe_ctx *pipe_ctx); +void set_drr_and_clear_adjust_pending( + struct pipe_ctx *pipe_ctx, + struct dc_stream_state *stream, + struct drr_params *params); + void hwss_execute_sequence(struct dc *dc, struct block_sequence block_sequence[], int num_steps); -- 2.51.0 From 8a21da2842bb22b2b80e5902d0438030d729bfd3 Mon Sep 17 00:00:00 2001 From: Peichen Huang Date: Tue, 25 Feb 2025 14:52:30 +0800 Subject: [PATCH 10/16] drm/amd/display: not abort link train when bw is low [WHY] DP tunneling should not abort link train even bandwidth become too low after downgrade. Otherwise, it would fail compliance test. [HOW} Do link train with downgrade settings even bandwidth is not enough Reviewed-by: Cruise Hung Reviewed-by: Meenakshikumar Somasundaram Signed-off-by: Peichen Huang Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/link/protocols/link_dp_training.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 613298d21d03..ef358afdfb65 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1789,13 +1789,10 @@ bool perform_link_training_with_retries( is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && (cur_link_settings.lane_count <= LANE_COUNT_ONE)); - if (is_link_bw_low) { + if (is_link_bw_low) DC_LOG_WARNING( "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", __func__, link->link_index, req_bw, link_bw); - - return false; - } } msleep(delay_between_attempts); -- 2.51.0 From 084e0735448ae4d01928b537c03c7042aef2ab81 Mon Sep 17 00:00:00 2001 From: George Shen Date: Thu, 13 Feb 2025 14:46:49 -0500 Subject: [PATCH 11/16] drm/amd/display: Implement PCON regulated autonomous mode handling [Why/How] DP spec has been updated recently to make regulated autonomous mode more well-defined. In case any PCON vendors choose to implement regulated autonomous mode in the future, pre-emptively add handling for the regulated autonomous mode based on current spec. Reviewed-by: Wenjing Liu Signed-off-by: George Shen Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 27 ++++++++- .../dc/link/protocols/link_dp_capability.c | 55 +++++++++++++++---- 2 files changed, 71 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 1f4f11adc491..77c87ad57220 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -432,7 +432,28 @@ union hdmi_encoded_link_bw { uint8_t BW_32Gbps:1; uint8_t BW_40Gbps:1; uint8_t BW_48Gbps:1; - uint8_t RESERVED:1; // Bit 7 + uint8_t FRL_LINK_TRAINING_FINISHED:1; // Bit 7 + } bits; + uint8_t raw; +}; + +union hdmi_tx_link_status { + struct { + uint8_t HDMI_TX_LINK_ACTIVE_STATUS:1; + uint8_t HDMI_TX_READY_STATUS:1; + uint8_t RESERVED:6; + } bits; + uint8_t raw; +}; + +union autonomous_mode_and_frl_link_status { + struct { + uint8_t FRL_LT_IN_PROGRESS_STATUS:1; + uint8_t FRL_LT_LINK_CONFIG_IN_PROGRESS:3; + uint8_t RESERVED:1; + uint8_t FALLBACK_POLICY:1; + uint8_t FALLBACK_POLICY_VALID:1; + uint8_t REGULATED_AUTONOMOUS_MODE_SUPPORTED:1; } bits; uint8_t raw; }; @@ -1166,6 +1187,7 @@ struct dc_dongle_caps { uint32_t dp_hdmi_max_bpc; uint32_t dp_hdmi_max_pixel_clk_in_khz; uint32_t dp_hdmi_frl_max_link_bw_in_kbps; + uint32_t dp_hdmi_regulated_autonomous_mode_support; struct dc_dongle_dfp_cap_ext dfp_cap_ext; }; @@ -1394,6 +1416,9 @@ struct dp_trace { #ifndef DP_LTTPR_ALPM_CAPABILITIES #define DP_LTTPR_ALPM_CAPABILITIES 0xF0009 #endif +#ifndef DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS +#define DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS 0x303C +#endif #ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE #define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 #endif diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index a77410122636..21ee0d96c9d4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -265,6 +265,8 @@ static uint32_t intersect_frl_link_bw_support( supported_bw_in_kbps = 18000000; else if (hdmi_encoded_link_bw.bits.BW_9Gbps) supported_bw_in_kbps = 9000000; + else if (hdmi_encoded_link_bw.bits.FRL_LINK_TRAINING_FINISHED) + supported_bw_in_kbps = 0; /* This case should only get hit in regulated autonomous mode. */ return supported_bw_in_kbps; } @@ -1075,6 +1077,48 @@ static enum dc_status wake_up_aux_channel(struct dc_link *link) return DC_OK; } +static void read_and_intersect_post_frl_lt_status( + struct dc_link *link) +{ + union autonomous_mode_and_frl_link_status autonomous_mode_caps = {0}; + union hdmi_tx_link_status hdmi_tx_link_status = {0}; + union hdmi_encoded_link_bw hdmi_encoded_link_bw = {0}; + + /* Check if dongle supports regulated autonomous mode. */ + core_link_read_dpcd(link, DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS, + &autonomous_mode_caps.raw, sizeof(autonomous_mode_caps)); + + link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support = + autonomous_mode_caps.bits.REGULATED_AUTONOMOUS_MODE_SUPPORTED; + + if (link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support) { + DC_LOG_DC("%s: PCON supports regulated autonomous mode.\n", __func__); + + core_link_read_dpcd(link, DP_PCON_HDMI_TX_LINK_STATUS, + &hdmi_tx_link_status.raw, sizeof(hdmi_tx_link_status)); + } + + // Intersect reported max link bw support with the supported link rate post FRL link training + if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, + &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { + + if (link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support && + (!hdmi_tx_link_status.bits.HDMI_TX_READY_STATUS || + !hdmi_encoded_link_bw.bits.FRL_LINK_TRAINING_FINISHED)) { + DC_LOG_WARNING("%s: PCON TX link training has not finished.\n", __func__); + + /* Link training not finished, ignore values from this DPCD reg. */ + return; + } + + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, + hdmi_encoded_link_bw); + DC_LOG_DC("%s: pcon frl link bw = %u\n", __func__, + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps); + } +} + static void get_active_converter_info( uint8_t data, struct dc_link *link) { @@ -1163,21 +1207,12 @@ static void get_active_converter_info( hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); if (link->dc->caps.dp_hdmi21_pcon_support) { - union hdmi_encoded_link_bw hdmi_encoded_link_bw; link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = link_bw_kbps_from_raw_frl_link_rate_data( hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); - // Intersect reported max link bw support with the supported link rate post FRL link training - if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, - &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { - link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( - link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, - hdmi_encoded_link_bw); - DC_LOG_DC("%s: pcon frl link bw = %u\n", __func__, - link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps); - } + read_and_intersect_post_frl_lt_status(link); if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) link->dpcd_caps.dongle_caps.extendedCapValid = true; -- 2.51.0 From 50bcdef7b616cc6471172fd8cee32a57d6fb39a6 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Wed, 26 Feb 2025 23:52:57 -0500 Subject: [PATCH 12/16] drm/amd/display: assume VBIOS supports DSC as default [Why & How] The clear_dsc_setting at boot logic was based on dcn version check. As such new ASIC lost this DSC clear up logic, change the assumption to BIOS support eDP DSC for new ASIC. Reviewed-by: Alvin Lee Signed-off-by: Charlene Liu Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index bfd734e15731..dbc6e533dcac 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1836,11 +1836,10 @@ static void clean_up_dsc_blocks(struct dc *dc) struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; int i; - if (dc->ctx->dce_version != DCN_VERSION_3_5 && - dc->ctx->dce_version != DCN_VERSION_3_6 && - dc->ctx->dce_version != DCN_VERSION_3_51) + if (!dc->caps.is_apu || + dc->ctx->dce_version < DCN_VERSION_3_15) return; - + /*VBIOS supports dsc starts from dcn315*/ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { struct dcn_dsc_state s = {0}; -- 2.51.0 From 15d1c2e6bf60511ba068d7d735d051911c6c5b92 Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Fri, 20 Dec 2024 17:01:29 -0500 Subject: [PATCH 13/16] drm/amd/display: Add Support for reg inbox0 for host->DMUB CMDs [WHY] DCN4+ supports a new register based mailbox for sending messages from host to DMCUB. This mailbox supports 64 byte commands, which makes it compatible with the same structure as the frame buffer based mailbox. [HOW] The intention for reg_inbox0 is to be slot in replacement for the frame buffer based mailbox (Inbox1). It supports all of the required features: - Supports all messages handled by FB Inbox1 - Supports multi command batching Reviewed-by: Joshua Aberback Signed-off-by: Dillon Varone Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 185 ++++++----- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h | 2 +- drivers/gpu/drm/amd/display/dc/dc_helper.c | 2 +- .../gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c | 3 +- .../gpu/drm/amd/display/dc/dce/dmub_replay.c | 19 +- .../display/dc/link/protocols/link_dp_dpia.c | 1 + drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 133 +++++--- .../gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 49 ++- .../drm/amd/display/dmub/src/dmub_dcn401.c | 121 ++++--- .../drm/amd/display/dmub/src/dmub_dcn401.h | 4 +- .../gpu/drm/amd/display/dmub/src/dmub_srv.c | 302 ++++++++++++------ 11 files changed, 547 insertions(+), 274 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 614e03bfd598..ca6da53f45ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -70,20 +70,28 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) } } -void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) +bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv) { - struct dmub_srv *dmub = dc_dmub_srv->dmub; - struct dc_context *dc_ctx = dc_dmub_srv->ctx; + struct dmub_srv *dmub; + struct dc_context *dc_ctx; enum dmub_status status; + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + dc_ctx = dc_dmub_srv->ctx; + dmub = dc_dmub_srv->dmub; + do { - status = dmub_srv_wait_for_idle(dmub, 100000); + status = dmub_srv_wait_for_pending(dmub, 100000); } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); if (status != DMUB_STATUS_OK) { DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } + + return status == DMUB_STATUS_OK; } void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) @@ -126,7 +134,49 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv, } } -bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, +static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, + unsigned int count, + union dmub_rb_cmd *cmd_list) +{ + struct dc_context *dc_ctx; + struct dmub_srv *dmub; + enum dmub_status status = DMUB_STATUS_OK; + int i; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + dc_ctx = dc_dmub_srv->ctx; + dmub = dc_dmub_srv->dmub; + + for (i = 0 ; i < count; i++) { + /* confirm no messages pending */ + do { + status = dmub_srv_wait_for_idle(dmub, 100000); + } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); + + /* queue command */ + if (status == DMUB_STATUS_OK) + status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]); + + /* check for errors */ + if (status != DMUB_STATUS_OK) { + break; + } + } + + if (status != DMUB_STATUS_OK) { + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } + return false; + } + + return true; +} + +static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list) { @@ -143,11 +193,16 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, for (i = 0 ; i < count; i++) { // Queue command - status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + if (!cmd_list[i].cmd_common.header.multi_cmd_pending || + dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) { + status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]); + } else { + status = DMUB_STATUS_QUEUE_FULL; + } if (status == DMUB_STATUS_QUEUE_FULL) { /* Execute and wait for queue to become empty again. */ - status = dmub_srv_cmd_execute(dmub); + status = dmub_srv_fb_cmd_execute(dmub); if (status == DMUB_STATUS_POWER_STATE_D3) return false; @@ -156,7 +211,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); /* Requeue the command. */ - status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]); } if (status != DMUB_STATUS_OK) { @@ -168,7 +223,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, } } - status = dmub_srv_cmd_execute(dmub); + status = dmub_srv_fb_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { if (status != DMUB_STATUS_POWER_STATE_D3) { DC_ERROR("Error starting DMUB execution: status=%d\n", status); @@ -180,6 +235,23 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, return true; } +bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, + unsigned int count, + union dmub_rb_cmd *cmd_list) +{ + bool res = false; + + if (dc_dmub_srv && dc_dmub_srv->dmub) { + if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) { + res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list); + } else { + res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list); + } + } + + return res; +} + bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, enum dm_dmub_wait_type wait_type, union dmub_rb_cmd *cmd_list) @@ -202,7 +274,8 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); if (!dmub->debug.timeout_info.timeout_occured) { dmub->debug.timeout_info.timeout_occured = true; - dmub->debug.timeout_info.timeout_cmd = *cmd_list; + if (cmd_list) + dmub->debug.timeout_info.timeout_cmd = *cmd_list; dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx); } dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); @@ -210,8 +283,9 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, } // Copy data back from ring buffer into command - if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) - dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); + if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) { + dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list); + } } return true; @@ -224,74 +298,10 @@ bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) { - struct dc_context *dc_ctx; - struct dmub_srv *dmub; - enum dmub_status status; - int i; - - if (!dc_dmub_srv || !dc_dmub_srv->dmub) - return false; - - dc_ctx = dc_dmub_srv->ctx; - dmub = dc_dmub_srv->dmub; - - for (i = 0 ; i < count; i++) { - // Queue command - status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); - - if (status == DMUB_STATUS_QUEUE_FULL) { - /* Execute and wait for queue to become empty again. */ - status = dmub_srv_cmd_execute(dmub); - if (status == DMUB_STATUS_POWER_STATE_D3) - return false; - - status = dmub_srv_wait_for_idle(dmub, 100000); - if (status != DMUB_STATUS_OK) - return false; - - /* Requeue the command. */ - status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); - } - - if (status != DMUB_STATUS_OK) { - if (status != DMUB_STATUS_POWER_STATE_D3) { - DC_ERROR("Error queueing DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); - } - return false; - } - } - - status = dmub_srv_cmd_execute(dmub); - if (status != DMUB_STATUS_OK) { - if (status != DMUB_STATUS_POWER_STATE_D3) { - DC_ERROR("Error starting DMUB execution: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); - } + if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list)) return false; - } - - // Wait for DMUB to process command - if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { - if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { - do { - status = dmub_srv_wait_for_idle(dmub, 100000); - } while (status != DMUB_STATUS_OK); - } else - status = dmub_srv_wait_for_idle(dmub, 100000); - if (status != DMUB_STATUS_OK) { - DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); - return false; - } - - // Copy data back from ring buffer into command - if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) - dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); - } - - return true; + return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list); } bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) @@ -1243,7 +1253,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL); memset(&new_signals, 0, sizeof(new_signals)); @@ -1400,7 +1410,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); - dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub); + dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub); } } @@ -1654,7 +1664,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc, /* fill in generic command header */ global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; - global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); + global_cmd->header.payload_bytes = + sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); if (enable) { /* send global configuration parameters */ @@ -1673,11 +1684,13 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc, /* configure command header */ stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; - stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); + stream_base_cmd->header.payload_bytes = + sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); stream_base_cmd->header.multi_cmd_pending = 1; stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; - stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); + stream_sub_state_cmd->header.payload_bytes = + sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); stream_sub_state_cmd->header.multi_cmd_pending = 1; /* copy stream static base state */ memcpy(&stream_base_cmd->config, @@ -1723,7 +1736,8 @@ void dc_dmub_srv_fams2_drr_update(struct dc *dc, cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num; cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger; - cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header); + cmd.fams2_drr_update.header.payload_bytes = + sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header); dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } @@ -1759,7 +1773,8 @@ void dc_dmub_srv_fams2_passthrough_flip( /* build command header */ cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP; - cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip); + cmds[num_cmds].fams2_flip.header.payload_bytes = + sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header); /* for chaining multiple commands, all but last command should set to 1 */ cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index a636f4c3f01d..ada5c2fb2db3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -58,7 +58,7 @@ struct dc_dmub_srv { bool needs_idle_wake; }; -void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); +bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv); bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 8f077e15b4f0..72b87b78da0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -682,7 +682,7 @@ void reg_sequence_wait_done(const struct dc_context *ctx) if (offload && ctx->dc->debug.dmub_offload_enabled && !ctx->dc->debug.dmcub_emulation) { - dc_dmub_srv_wait_idle(ctx->dmub_srv); + dc_dmub_srv_wait_for_idle(ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL); } } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index 0d7e7f3b81a1..a641ae04450c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -240,7 +240,8 @@ bool dmub_abm_save_restore( cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask; - cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore); + cmd.abm_save_restore.header.payload_bytes = + sizeof(struct dmub_rb_cmd_abm_save_restore) - sizeof(struct dmub_cmd_header); dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index c31e4f26a305..fcd3d86ad517 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -280,7 +280,9 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm memset(&cmd, 0, sizeof(cmd)); pCmd->header.type = DMUB_CMD__REPLAY; pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL; - pCmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal); + pCmd->header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal) - + sizeof(struct dmub_cmd_header); pCmd->replay_set_power_opt_data.power_opt = power_opt; pCmd->replay_set_power_opt_data.panel_inst = panel_inst; pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF); @@ -319,7 +321,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_set_timing_sync.header.sub_type = DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED; cmd.replay_set_timing_sync.header.payload_bytes = - sizeof(struct dmub_rb_cmd_replay_set_timing_sync); + sizeof(struct dmub_rb_cmd_replay_set_timing_sync) - + sizeof(struct dmub_cmd_header); //Cmd Body cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst = cmd_element->sync_data.panel_inst; @@ -331,7 +334,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_set_frameupdate_timer.header.sub_type = DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER; cmd.replay_set_frameupdate_timer.header.payload_bytes = - sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer); + sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer) - + sizeof(struct dmub_cmd_header); //Cmd Body cmd.replay_set_frameupdate_timer.data.panel_inst = cmd_element->panel_inst; @@ -345,7 +349,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_set_pseudo_vtotal.header.sub_type = DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL; cmd.replay_set_pseudo_vtotal.header.payload_bytes = - sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal); + sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal) - + sizeof(struct dmub_cmd_header); //Cmd Body cmd.replay_set_pseudo_vtotal.data.panel_inst = cmd_element->pseudo_vtotal_data.panel_inst; @@ -357,7 +362,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_disabled_adaptive_sync_sdp.header.sub_type = DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP; cmd.replay_disabled_adaptive_sync_sdp.header.payload_bytes = - sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp); + sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp) - + sizeof(struct dmub_cmd_header); //Cmd Body cmd.replay_disabled_adaptive_sync_sdp.data.panel_inst = cmd_element->disabled_adaptive_sync_sdp_data.panel_inst; @@ -369,7 +375,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_set_general_cmd.header.sub_type = DMUB_CMD__REPLAY_SET_GENERAL_CMD; cmd.replay_set_general_cmd.header.payload_bytes = - sizeof(struct dmub_rb_cmd_replay_set_general_cmd); + sizeof(struct dmub_rb_cmd_replay_set_general_cmd) - + sizeof(struct dmub_cmd_header); //Cmd Body cmd.replay_set_general_cmd.data.panel_inst = cmd_element->set_general_cmd_data.panel_inst; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 0d123e647652..c149210096ac 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -92,6 +92,7 @@ bool dpia_query_hpd_status(struct dc_link *link) /* prepare QUERY_HPD command */ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; + cmd.query_hpd.header.payload_bytes = sizeof(cmd.query_hpd.data); cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 8ebeb34021e5..203e3a440845 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -51,8 +51,8 @@ * for the cache windows. * * The call to dmub_srv_hw_init() programs the DMCUB registers to prepare - * for command submission. Commands can be queued via dmub_srv_cmd_queue() - * and executed via dmub_srv_cmd_execute(). + * for command submission. Commands can be queued via dmub_srv_fb_cmd_queue() + * and executed via dmub_srv_fb_cmd_execute(). * * If the queue is full the dmub_srv_wait_for_idle() call can be used to * wait until the queue has been cleared. @@ -170,6 +170,13 @@ enum dmub_srv_power_state_type { DMUB_POWER_STATE_D3 = 8 }; +/* enum dmub_inbox_cmd_interface type - defines default interface for host->dmub commands */ +enum dmub_inbox_cmd_interface_type { + DMUB_CMD_INTERFACE_DEFAULT = 0, + DMUB_CMD_INTERFACE_FB = 1, + DMUB_CMD_INTERFACE_REG = 2, +}; + /** * struct dmub_region - dmub hw memory region * @base: base address for region, must be 256 byte aligned @@ -349,6 +356,21 @@ struct dmub_diagnostic_data { uint8_t is_cw6_enabled : 1; }; +struct dmub_srv_inbox { + /* generic status */ + uint64_t num_submitted; + uint64_t num_reported; + union { + /* frame buffer mailbox status */ + struct dmub_rb rb; + /* register mailbox status */ + struct { + bool is_pending; + bool is_multi_pending; + }; + }; +}; + /** * struct dmub_srv_base_funcs - Driver specific base callbacks */ @@ -462,18 +484,21 @@ struct dmub_srv_hw_funcs { void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx); void (*subvp_save_surf_addr)(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index); + void (*send_reg_inbox0_cmd_msg)(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); uint32_t (*read_reg_inbox0_rsp_int_status)(struct dmub_srv *dmub); void (*read_reg_inbox0_cmd_rsp)(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); void (*write_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub); + void (*clear_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub); + void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable); + uint32_t (*read_reg_outbox0_rdy_int_status)(struct dmub_srv *dmub); void (*write_reg_outbox0_rdy_int_ack)(struct dmub_srv *dmub); void (*read_reg_outbox0_msg)(struct dmub_srv *dmub, uint32_t *msg); void (*write_reg_outbox0_rsp)(struct dmub_srv *dmub, uint32_t *rsp); uint32_t (*read_reg_outbox0_rsp_int_status)(struct dmub_srv *dmub); - void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable); void (*enable_reg_outbox0_rdy_int)(struct dmub_srv *dmub, bool enable); }; @@ -493,6 +518,7 @@ struct dmub_srv_create_params { enum dmub_asic asic; uint32_t fw_version; bool is_virtual; + enum dmub_inbox_cmd_interface_type inbox_type; }; /** @@ -519,11 +545,11 @@ struct dmub_srv { struct dmub_srv_dcn32_regs *regs_dcn32; struct dmub_srv_dcn35_regs *regs_dcn35; const struct dmub_srv_dcn401_regs *regs_dcn401; - struct dmub_srv_base_funcs funcs; struct dmub_srv_hw_funcs hw_funcs; - struct dmub_rb inbox1_rb; + struct dmub_srv_inbox inbox1; uint32_t inbox1_last_wptr; + struct dmub_srv_inbox reg_inbox0; /** * outbox1_rb is accessed without locks (dal & dc) * and to be used only in dmub_srv_stat_get_notification() @@ -543,6 +569,7 @@ struct dmub_srv { struct dmub_fw_meta_info meta_info; struct dmub_feature_caps feature_caps; struct dmub_visual_confirm_color visual_confirm_color; + enum dmub_inbox_cmd_interface_type inbox_type; enum dmub_srv_power_state_type power_state; struct dmub_diagnostic_data debug; @@ -700,19 +727,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub); /** - * dmub_srv_sync_inbox1() - sync sw state with hw state - * @dmub: the dmub service - * - * Sync sw state with hw state when resume from S0i3 - * - * Return: - * DMUB_STATUS_OK - success - * DMUB_STATUS_INVALID - unspecified error - */ -enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub); - -/** - * dmub_srv_cmd_queue() - queues a command to the DMUB + * dmub_srv_fb_cmd_queue() - queues a command to the DMUB * @dmub: the dmub service * @cmd: the command to queue * @@ -724,11 +739,11 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub); * DMUB_STATUS_QUEUE_FULL - no remaining room in queue * DMUB_STATUS_INVALID - unspecified error */ -enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, +enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub, const union dmub_rb_cmd *cmd); /** - * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub + * dmub_srv_fb_cmd_execute() - Executes a queued sequence to the dmub * @dmub: the dmub service * * Begins execution of queued commands on the dmub. @@ -737,7 +752,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, * DMUB_STATUS_OK - success * DMUB_STATUS_INVALID - unspecified error */ -enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub); +enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub); /** * dmub_srv_wait_for_hw_pwr_up() - Waits for firmware hardware power up is completed @@ -795,6 +810,23 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, uint32_t timeout_us); +/** + * dmub_srv_wait_for_pending() - Re-entrant wait for messages currently pending + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Waits until the commands queued prior to this call are complete. + * If interfaces remain busy due to additional work being submitted + * concurrently, this function will not continue to wait. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub, + uint32_t timeout_us); + /** * dmub_srv_wait_for_idle() - Waits for the DMUB to be idle * @dmub: the dmub service @@ -893,9 +925,6 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub, union dmub_fw_boot_options *option); -enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, - union dmub_rb_cmd *cmd); - enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, bool skip); @@ -959,26 +988,6 @@ enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub); */ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index); -/** - * dmub_srv_send_reg_inbox0_cmd() - send a dmub command and wait for the command - * being processed by DMUB. - * @dmub: The dmub service - * @cmd: The dmub command being sent. If with_replay is true, the function will - * update cmd with replied data. - * @with_reply: true if DMUB reply needs to be copied back to cmd. false if the - * cmd doesn't need to be replied. - * @timeout_us: timeout in microseconds. - * - * Return: - * DMUB_STATUS_OK - success - * DMUB_STATUS_TIMEOUT - DMUB fails to process the command within the timeout - * interval. - */ -enum dmub_status dmub_srv_send_reg_inbox0_cmd( - struct dmub_srv *dmub, - union dmub_rb_cmd *cmd, - bool with_reply, uint32_t timeout_us); - /** * dmub_srv_set_power_state() - Track DC power state in dmub_srv * @dmub: The dmub service @@ -991,4 +1000,40 @@ enum dmub_status dmub_srv_send_reg_inbox0_cmd( */ void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state); +/** + * dmub_srv_reg_cmd_execute() - Executes provided command to the dmub + * @dmub: the dmub service + * @cmd: the command packet to be executed + * + * Executes a single command for the dmub. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); + + +/** + * dmub_srv_cmd_get_response() - Copies return data for command into buffer + * @dmub: the dmub service + * @cmd_rsp: response buffer + * + * Copies return data for command into buffer + */ +void dmub_srv_cmd_get_response(struct dmub_srv *dmub, + union dmub_rb_cmd *cmd_rsp); + +/** + * dmub_srv_sync_inboxes() - Sync inbox state + * @dmub: the dmub service + * + * Sync inbox state + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub); + #endif /* _DMUB_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index f84bbc033e64..1f5f4e3e49d4 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -1331,6 +1331,16 @@ enum dmub_inbox0_command { */ #define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY) +/** + * Maximum number of items in the DMUB REG INBOX0 internal ringbuffer. + */ +#define DMUB_REG_INBOX0_RB_MAX_ENTRY 16 + +/** + * Ringbuffer size in bytes. + */ +#define DMUB_REG_INBOX0_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_REG_INBOX0_RB_MAX_ENTRY) + /** * REG_SET mask for reg offload. */ @@ -1533,7 +1543,8 @@ struct dmub_cmd_header { unsigned int sub_type : 8; /**< command sub type */ unsigned int ret_status : 1; /**< 1 if returned data, 0 otherwise */ unsigned int multi_cmd_pending : 1; /**< 1 if multiple commands chained together */ - unsigned int reserved0 : 6; /**< reserved bits */ + unsigned int is_reg_based : 1; /**< 1 if register based mailbox cmd, 0 if FB based cmd */ + unsigned int reserved0 : 5; /**< reserved bits */ unsigned int payload_bytes : 6; /* payload excluding header - up to 60 bytes */ unsigned int reserved1 : 2; /**< reserved bits */ }; @@ -5890,6 +5901,42 @@ static inline bool dmub_rb_empty(struct dmub_rb *rb) return (rb->wrpt == rb->rptr); } +/** + * @brief gets number of outstanding requests in the RB + * + * @param rb DMUB Ringbuffer + * @return true if full + */ +static inline uint32_t dmub_rb_num_outstanding(struct dmub_rb *rb) +{ + uint32_t data_count; + + if (rb->wrpt >= rb->rptr) + data_count = rb->wrpt - rb->rptr; + else + data_count = rb->capacity - (rb->rptr - rb->wrpt); + + return data_count / DMUB_RB_CMD_SIZE; +} + +/** + * @brief gets number of free buffers in the RB + * + * @param rb DMUB Ringbuffer + * @return true if full + */ +static inline uint32_t dmub_rb_num_free(struct dmub_rb *rb) +{ + uint32_t data_count; + + if (rb->wrpt >= rb->rptr) + data_count = rb->wrpt - rb->rptr; + else + data_count = rb->capacity - (rb->rptr - rb->wrpt); + + return (rb->capacity - data_count) / DMUB_RB_CMD_SIZE; +} + /** * @brief Checks if the ringbuffer is full * diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c index e67f7c4784eb..731ca9b6a6cf 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c @@ -517,28 +517,69 @@ void dmub_dcn401_send_reg_inbox0_cmd_msg(struct dmub_srv *dmub, union dmub_rb_cmd *cmd) { uint32_t *dwords = (uint32_t *)cmd; - + int32_t payload_size_bytes = cmd->cmd_common.header.payload_bytes; + uint32_t msg_index; static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch"); - REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[0]); - REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[1]); - REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[2]); - REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[3]); - REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[4]); - REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[5]); - REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[6]); - REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[7]); - REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[8]); - REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[9]); - REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[10]); - REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[11]); - REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[12]); - REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[13]); - REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[14]); + /* read remaining data based on payload size */ + for (msg_index = 0; msg_index < 15; msg_index++) { + if (payload_size_bytes <= msg_index * 4) { + break; + } + + switch (msg_index) { + case 0: + REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[msg_index + 1]); + break; + case 1: + REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[msg_index + 1]); + break; + case 2: + REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[msg_index + 1]); + break; + case 3: + REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[msg_index + 1]); + break; + case 4: + REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[msg_index + 1]); + break; + case 5: + REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[msg_index + 1]); + break; + case 6: + REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[msg_index + 1]); + break; + case 7: + REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[msg_index + 1]); + break; + case 8: + REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[msg_index + 1]); + break; + case 9: + REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[msg_index + 1]); + break; + case 10: + REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[msg_index + 1]); + break; + case 11: + REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[msg_index + 1]); + break; + case 12: + REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[msg_index + 1]); + break; + case 13: + REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[msg_index + 1]); + break; + case 14: + REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[msg_index + 1]); + break; + } + } + /* writing to INBOX RDY register will trigger DMUB REG INBOX0 RDY * interrupt. */ - REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[15]); + REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[0]); } uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub) @@ -556,30 +597,39 @@ void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub, static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch"); - dwords[0] = REG_READ(DMCUB_REG_INBOX0_MSG0); - dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG1); - dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG2); - dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG3); - dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG4); - dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG5); - dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG6); - dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG7); - dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG8); - dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG9); - dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG10); - dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG11); - dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG12); - dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG13); - dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG14); - dwords[15] = REG_READ(DMCUB_REG_INBOX0_RSP); + dwords[0] = REG_READ(DMCUB_REG_INBOX0_RSP); + dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG0); + dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG1); + dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG2); + dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG3); + dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG4); + dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG5); + dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG6); + dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG7); + dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG8); + dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG9); + dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG10); + dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG11); + dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG12); + dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG13); + dwords[15] = REG_READ(DMCUB_REG_INBOX0_MSG14); } void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub) { REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 1); +} + +void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub) +{ REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 0); } +void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable) +{ + REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0); +} + void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub) { REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK, 1); @@ -604,11 +654,6 @@ uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub) return status; } -void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable) -{ - REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0); -} - void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable) { REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN, enable ? 1:0); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h index c35be52676f6..88c3a44d67d9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h @@ -277,11 +277,13 @@ uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub); void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub); +void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub); +void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable); + void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub); void dmub_dcn401_read_reg_outbox0_msg(struct dmub_srv *dmub, uint32_t *msg); void dmub_dcn401_write_reg_outbox0_rsp(struct dmub_srv *dmub, uint32_t *msg); uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub); -void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable); void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable); uint32_t dmub_dcn401_read_reg_outbox0_rdy_int_status(struct dmub_srv *dmub); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 0a2285540daf..713576a1f6fa 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -157,6 +157,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) { struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; + /* default to specifying now inbox type */ + enum dmub_inbox_cmd_interface_type default_inbox_type = DMUB_CMD_INTERFACE_DEFAULT; + switch (asic) { case DMUB_ASIC_DCN20: case DMUB_ASIC_DCN21: @@ -353,7 +356,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->init_reg_offsets = dmub_srv_dcn35_regs_init; if (asic == DMUB_ASIC_DCN351) - funcs->init_reg_offsets = dmub_srv_dcn351_regs_init; + funcs->init_reg_offsets = dmub_srv_dcn351_regs_init; if (asic == DMUB_ASIC_DCN36) funcs->init_reg_offsets = dmub_srv_dcn36_regs_init; @@ -395,10 +398,15 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_current_time = dmub_dcn401_get_current_time; funcs->get_diagnostic_data = dmub_dcn401_get_diagnostic_data; + funcs->send_reg_inbox0_cmd_msg = dmub_dcn401_send_reg_inbox0_cmd_msg; funcs->read_reg_inbox0_rsp_int_status = dmub_dcn401_read_reg_inbox0_rsp_int_status; funcs->read_reg_inbox0_cmd_rsp = dmub_dcn401_read_reg_inbox0_cmd_rsp; funcs->write_reg_inbox0_rsp_int_ack = dmub_dcn401_write_reg_inbox0_rsp_int_ack; + funcs->clear_reg_inbox0_rsp_int_ack = dmub_dcn401_clear_reg_inbox0_rsp_int_ack; + funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int; + default_inbox_type = DMUB_CMD_INTERFACE_FB; // still default to FB for now + funcs->write_reg_outbox0_rdy_int_ack = dmub_dcn401_write_reg_outbox0_rdy_int_ack; funcs->read_reg_outbox0_msg = dmub_dcn401_read_reg_outbox0_msg; funcs->write_reg_outbox0_rsp = dmub_dcn401_write_reg_outbox0_rsp; @@ -411,6 +419,20 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) return false; } + /* set default inbox type if not overriden */ + if (dmub->inbox_type == DMUB_CMD_INTERFACE_DEFAULT) { + if (default_inbox_type != DMUB_CMD_INTERFACE_DEFAULT) { + /* use default inbox type as specified by DCN rev */ + dmub->inbox_type = default_inbox_type; + } else if (funcs->send_reg_inbox0_cmd_msg) { + /* prefer reg as default inbox type if present */ + dmub->inbox_type = DMUB_CMD_INTERFACE_REG; + } else { + /* use fb as fallback */ + dmub->inbox_type = DMUB_CMD_INTERFACE_FB; + } + } + return true; } @@ -426,6 +448,7 @@ enum dmub_status dmub_srv_create(struct dmub_srv *dmub, dmub->asic = params->asic; dmub->fw_version = params->fw_version; dmub->is_virtual = params->is_virtual; + dmub->inbox_type = params->inbox_type; /* Setup asic dependent hardware funcs. */ if (!dmub_srv_hw_setup(dmub, params->asic)) { @@ -695,7 +718,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, inbox1.base = cw4.region.base; inbox1.top = cw4.region.base + DMUB_RB_SIZE; outbox1.base = inbox1.top; - outbox1.top = cw4.region.top; + outbox1.top = inbox1.top + DMUB_RB_SIZE; cw5.offset.quad_part = tracebuff_fb->gpu_addr; cw5.region.base = DMUB_CW5_BASE; @@ -737,7 +760,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, rb_params.ctx = dmub; rb_params.base_address = mail_fb->cpu_addr; rb_params.capacity = DMUB_RB_SIZE; - dmub_rb_init(&dmub->inbox1_rb, &rb_params); + dmub_rb_init(&dmub->inbox1.rb, &rb_params); // Initialize outbox1 ring buffer rb_params.ctx = dmub; @@ -768,27 +791,6 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, return DMUB_STATUS_OK; } -enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub) -{ - if (!dmub->sw_init) - return DMUB_STATUS_INVALID; - - if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) { - uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); - uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub); - - if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) { - return DMUB_STATUS_HW_FAILURE; - } else { - dmub->inbox1_rb.rptr = rptr; - dmub->inbox1_rb.wrpt = wptr; - dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; - } - } - - return DMUB_STATUS_OK; -} - enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) { if (!dmub->sw_init) @@ -799,8 +801,13 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) /* mailboxes have been reset in hw, so reset the sw state as well */ dmub->inbox1_last_wptr = 0; - dmub->inbox1_rb.wrpt = 0; - dmub->inbox1_rb.rptr = 0; + dmub->inbox1.rb.wrpt = 0; + dmub->inbox1.rb.rptr = 0; + dmub->inbox1.num_reported = 0; + dmub->inbox1.num_submitted = 0; + dmub->reg_inbox0.num_reported = 0; + dmub->reg_inbox0.num_submitted = 0; + dmub->reg_inbox0.is_pending = 0; dmub->outbox0_rb.wrpt = 0; dmub->outbox0_rb.rptr = 0; dmub->outbox1_rb.wrpt = 0; @@ -811,7 +818,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) return DMUB_STATUS_OK; } -enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, +enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub, const union dmub_rb_cmd *cmd) { if (!dmub->hw_init) @@ -820,18 +827,20 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, if (dmub->power_state != DMUB_POWER_STATE_D0) return DMUB_STATUS_POWER_STATE_D3; - if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity || - dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) { + if (dmub->inbox1.rb.rptr > dmub->inbox1.rb.capacity || + dmub->inbox1.rb.wrpt > dmub->inbox1.rb.capacity) { return DMUB_STATUS_HW_FAILURE; } - if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) + if (dmub_rb_push_front(&dmub->inbox1.rb, cmd)) { + dmub->inbox1.num_submitted++; return DMUB_STATUS_OK; + } return DMUB_STATUS_QUEUE_FULL; } -enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) +enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub) { struct dmub_rb flush_rb; @@ -846,13 +855,13 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) * been flushed to framebuffer memory. Otherwise DMCUB might * read back stale, fully invalid or partially invalid data. */ - flush_rb = dmub->inbox1_rb; + flush_rb = dmub->inbox1.rb; flush_rb.rptr = dmub->inbox1_last_wptr; dmub_rb_flush_pending(&flush_rb); - dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); + dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1.rb.wrpt); - dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; + dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt; return DMUB_STATUS_OK; } @@ -910,26 +919,97 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, return DMUB_STATUS_TIMEOUT; } +static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub) +{ + if (dmub->reg_inbox0.is_pending) { + dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status && + !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub); + + if (!dmub->reg_inbox0.is_pending) { + /* ack the rsp interrupt */ + if (dmub->hw_funcs.write_reg_inbox0_rsp_int_ack) + dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub); + + /* only update the reported count if commands aren't being batched */ + if (!dmub->reg_inbox0.is_pending && !dmub->reg_inbox0.is_multi_pending) { + dmub->reg_inbox0.num_reported = dmub->reg_inbox0.num_submitted; + } + } + } +} + +enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub, + uint32_t timeout_us) +{ + uint32_t i; + const uint32_t polling_interval_us = 1; + struct dmub_srv_inbox scratch_reg_inbox0 = dmub->reg_inbox0; + struct dmub_srv_inbox scratch_inbox1 = dmub->inbox1; + const volatile struct dmub_srv_inbox *reg_inbox0 = &dmub->reg_inbox0; + const volatile struct dmub_srv_inbox *inbox1 = &dmub->inbox1; + + if (!dmub->hw_init || + !dmub->hw_funcs.get_inbox1_wptr) + return DMUB_STATUS_INVALID; + + /* take a snapshot of the required mailbox state */ + scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub); + + for (i = 0; i <= timeout_us; i += polling_interval_us) { + scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + + scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending && + dmub->hw_funcs.read_reg_inbox0_rsp_int_status && + !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub); + + if (scratch_inbox1.rb.rptr > dmub->inbox1.rb.capacity) + return DMUB_STATUS_HW_FAILURE; + + /* check current HW state first, but use command submission vs reported as a fallback */ + if ((dmub_rb_empty(&scratch_inbox1.rb) || + inbox1->num_reported >= scratch_inbox1.num_submitted) && + (!scratch_reg_inbox0.is_pending || + reg_inbox0->num_reported >= scratch_reg_inbox0.num_submitted)) + return DMUB_STATUS_OK; + + udelay(polling_interval_us); + } + + return DMUB_STATUS_TIMEOUT; +} + enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, uint32_t timeout_us) { uint32_t i, rptr; + const uint32_t polling_interval_us = 1; if (!dmub->hw_init) return DMUB_STATUS_INVALID; - for (i = 0; i <= timeout_us; ++i) { - rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + for (i = 0; i < timeout_us; i += polling_interval_us) { + /* update inbox1 state */ + rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); - if (rptr > dmub->inbox1_rb.capacity) + if (rptr > dmub->inbox1.rb.capacity) return DMUB_STATUS_HW_FAILURE; - dmub->inbox1_rb.rptr = rptr; + if (dmub->inbox1.rb.rptr > rptr) { + /* rb wrapped */ + dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE; + } else { + dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE; + } + dmub->inbox1.rb.rptr = rptr; + + /* update reg_inbox0 */ + dmub_srv_update_reg_inbox0_status(dmub); - if (dmub_rb_empty(&dmub->inbox1_rb)) + /* check for idle */ + if (dmub_rb_empty(&dmub->inbox1.rb) && !dmub->reg_inbox0.is_pending) return DMUB_STATUS_OK; - udelay(1); + udelay(polling_interval_us); } return DMUB_STATUS_TIMEOUT; @@ -1040,35 +1120,6 @@ enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, return DMUB_STATUS_OK; } -enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, - union dmub_rb_cmd *cmd) -{ - enum dmub_status status = DMUB_STATUS_OK; - - // Queue command - status = dmub_srv_cmd_queue(dmub, cmd); - - if (status != DMUB_STATUS_OK) - return status; - - // Execute command - status = dmub_srv_cmd_execute(dmub); - - if (status != DMUB_STATUS_OK) - return status; - - // Wait for DMUB to process command - status = dmub_srv_wait_for_idle(dmub, 100000); - - if (status != DMUB_STATUS_OK) - return status; - - // Copy data back from ring buffer into command - dmub_rb_get_return_data(&dmub->inbox1_rb, cmd); - - return status; -} - static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb, void *entry) { @@ -1160,46 +1211,105 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_ } } -enum dmub_status dmub_srv_send_reg_inbox0_cmd( - struct dmub_srv *dmub, - union dmub_rb_cmd *cmd, - bool with_reply, uint32_t timeout_us) +void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state) { - uint32_t rsp_ready = 0; - uint32_t i; + if (!dmub || !dmub->hw_init) + return; + + dmub->power_state = dmub_srv_power_state; +} + +enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd) +{ + uint32_t num_pending = 0; + + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + if (dmub->power_state != DMUB_POWER_STATE_D0) + return DMUB_STATUS_POWER_STATE_D3; + + if (!dmub->hw_funcs.send_reg_inbox0_cmd_msg || + !dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack) + return DMUB_STATUS_INVALID; + + if (dmub->reg_inbox0.num_submitted >= dmub->reg_inbox0.num_reported) + num_pending = dmub->reg_inbox0.num_submitted - dmub->reg_inbox0.num_reported; + else + /* num_submitted wrapped */ + num_pending = DMUB_REG_INBOX0_RB_MAX_ENTRY - + (dmub->reg_inbox0.num_reported - dmub->reg_inbox0.num_submitted); + if (num_pending >= DMUB_REG_INBOX0_RB_MAX_ENTRY) + return DMUB_STATUS_QUEUE_FULL; + + /* clear last rsp ack and send message */ + dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack(dmub); dmub->hw_funcs.send_reg_inbox0_cmd_msg(dmub, cmd); - for (i = 0; i < timeout_us; i++) { - rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub); - if (rsp_ready) - break; - udelay(1); + dmub->reg_inbox0.num_submitted++; + dmub->reg_inbox0.is_pending = true; + dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending; + + return DMUB_STATUS_OK; +} + +void dmub_srv_cmd_get_response(struct dmub_srv *dmub, + union dmub_rb_cmd *cmd_rsp) +{ + if (dmub) { + if (dmub->inbox_type == DMUB_CMD_INTERFACE_REG && + dmub->hw_funcs.read_reg_inbox0_cmd_rsp) { + dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd_rsp); + } else { + dmub_rb_get_return_data(&dmub->inbox1.rb, cmd_rsp); + } } - if (rsp_ready == 0) - return DMUB_STATUS_TIMEOUT; +} - if (with_reply) - dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd); +static enum dmub_status dmub_srv_sync_reg_inbox0(struct dmub_srv *dmub) +{ + if (!dmub || !dmub->sw_init) + return DMUB_STATUS_INVALID; - dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub); + dmub->reg_inbox0.is_pending = 0; + dmub->reg_inbox0.is_multi_pending = 0; - /* wait for rsp int status is cleared to initial state before exit */ - for (; i <= timeout_us; i++) { - rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub); - if (rsp_ready == 0) - break; - udelay(1); + return DMUB_STATUS_OK; +} + +static enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub) +{ + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) { + uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub); + + if (rptr > dmub->inbox1.rb.capacity || wptr > dmub->inbox1.rb.capacity) { + return DMUB_STATUS_HW_FAILURE; + } else { + dmub->inbox1.rb.rptr = rptr; + dmub->inbox1.rb.wrpt = wptr; + dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt; + } } - ASSERT(rsp_ready == 0); return DMUB_STATUS_OK; } -void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state) +enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub) { - if (!dmub || !dmub->hw_init) - return; + enum dmub_status status; - dmub->power_state = dmub_srv_power_state; + status = dmub_srv_sync_reg_inbox0(dmub); + if (status != DMUB_STATUS_OK) + return status; + + status = dmub_srv_sync_inbox1(dmub); + if (status != DMUB_STATUS_OK) + return status; + + return DMUB_STATUS_OK; } -- 2.51.0 From 274a87eb389f58eddcbc5659ab0b180b37e92775 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Thu, 27 Feb 2025 16:36:25 -0700 Subject: [PATCH 14/16] drm/amd/display: Assign normalized_pix_clk when color depth = 14 [WHY & HOW] A warning message "WARNING: CPU: 4 PID: 459 at ... /dc_resource.c:3397 calculate_phy_pix_clks+0xef/0x100 [amdgpu]" occurs because the display_color_depth == COLOR_DEPTH_141414 is not handled. This is observed in Radeon RX 6600 XT. It is fixed by assigning pix_clk * (14 * 3) / 24 - same as the rests. Also fixes the indentation in get_norm_pix_clk. Reviewed-by: Harry Wentland Signed-off-by: Alex Hung Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index ea404435c9b9..313a32248cd7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -3623,10 +3623,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing) break; case COLOR_DEPTH_121212: normalized_pix_clk = (pix_clk * 36) / 24; - break; + break; + case COLOR_DEPTH_141414: + normalized_pix_clk = (pix_clk * 42) / 24; + break; case COLOR_DEPTH_161616: normalized_pix_clk = (pix_clk * 48) / 24; - break; + break; default: ASSERT(0); break; -- 2.51.0 From dd60bfd5349a7a4c9e1a8b14c935593342c6c6a2 Mon Sep 17 00:00:00 2001 From: Leo Zeng Date: Thu, 27 Feb 2025 15:09:04 -0500 Subject: [PATCH 15/16] drm/amd/display: Fix visual confirm color not updating [WHY] Sometimes visual confirm color is updated, but the background color is not changed. This causes visual confrim to show incorrect colors. [HOW] Update background color when visual confirm color changes. Reviewed-by: Dillon Varone Signed-off-by: Leo Zeng Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 52ee2225e132..55b32dfbfdd6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -817,7 +817,14 @@ void hwss_build_fast_sequence(struct dc *dc, block_sequence[*num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC; (*num_steps)++; } - + if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && + dc->hwss.update_visual_confirm_color) { + block_sequence[*num_steps].params.update_visual_confirm_params.dc = dc; + block_sequence[*num_steps].params.update_visual_confirm_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.update_visual_confirm_params.mpcc_id = current_mpc_pipe->plane_res.hubp->inst; + block_sequence[*num_steps].func = MPC_UPDATE_VISUAL_CONFIRM; + (*num_steps)++; + } if (current_mpc_pipe->stream->update_flags.bits.out_csc) { block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpc = dc->res_pool->mpc; block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = current_mpc_pipe->plane_res.hubp->inst; -- 2.51.0 From d3069feecdb5542604d29b59acfd1fd213bad95b Mon Sep 17 00:00:00 2001 From: Zhikai Zhai Date: Thu, 27 Feb 2025 20:09:14 +0800 Subject: [PATCH 16/16] drm/amd/display: calculate the remain segments for all pipes [WHY] In some cases the remain de-tile buffer segments will be greater than zero if we don't add the non-top pipe to calculate, at this time the override de-tile buffer size will be valid and used. But it makes the de-tile buffer segments used finally for all of pipes exceed the maximum. [HOW] Add the non-top pipe to calculate the remain de-tile buffer segments. Don't set override size to use the average according to pipe count if the value exceed the maximum. Reviewed-by: Charlene Liu Signed-off-by: Zhikai Zhai Signed-off-by: Tom Chung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../dc/resource/dcn315/dcn315_resource.c | 42 +++++++++---------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 14acef036b5a..6c2bb3f63be1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -1698,7 +1698,7 @@ static int dcn315_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.dsc_input_bpc = 0; DC_FP_START(); dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); - if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) { + if (pixel_rate_crb) { int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format); /* Ceil to crb segment size */ int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate( @@ -1755,28 +1755,26 @@ static int dcn315_populate_dml_pipes_from_context( continue; } - if (!pipe->top_pipe && !pipe->prev_odm_pipe) { - bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) - || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); - - if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0) - pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + - (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); - if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { - /* Clamp to 2 pipe split max det segments */ - remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); - pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; - } - if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { - /* If we are splitting we must have an even number of segments */ - remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; - pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; - } - /* Convert segments into size for DML use */ - pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; - - crb_idx++; + bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) + || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + + if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0) + pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + + (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); + if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { + /* Clamp to 2 pipe split max det segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); + pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; + } + if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { + /* If we are splitting we must have an even number of segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; + pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; } + /* Convert segments into size for DML use */ + pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; + + crb_idx++; pipe_cnt++; } } -- 2.51.0