From 41b830476009f50233e8df538113765c0a59308d Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Mon, 24 Feb 2025 14:24:45 -0500 Subject: [PATCH 01/16] drm/amd/display: Add workaround for a panel Implement w/a for a panel which requires 10s delay after link detect. Signed-off-by: Aurabindo Pillai Reviewed-by: Alex Hung Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 27 ++++++++++++++++++- .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 10 +++++-- drivers/gpu/drm/amd/display/dc/dc_types.h | 1 + 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 58848d6b475a..7faf0d0cc23c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3330,6 +3330,24 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, } } +static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev, + struct dc_sink *sink) +{ + struct dc_panel_patch *ppatch = NULL; + + if (!sink) + return; + + ppatch = &sink->edid_caps.panel_patch; + if (ppatch->wait_after_dpcd_poweroff_ms) { + msleep(ppatch->wait_after_dpcd_poweroff_ms); + drm_dbg_driver(adev_to_drm(adev), + "%s: adding a %ds delay as w/a for panel\n", + __func__, + ppatch->wait_after_dpcd_poweroff_ms / 1000); + } +} + static int dm_resume(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -3451,6 +3469,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) /* Do detection*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + bool ret; if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; @@ -3476,7 +3495,11 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) } else { guard(mutex)(&dm->dc_lock); dc_exit_ips_for_hw_access(dm->dc); - dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); + if (ret) { + /* w/a delay for certain panels */ + apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); + } } if (aconnector->fake_enable && aconnector->dc_link->local_sink) @@ -3842,6 +3865,8 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); } if (ret) { + /* w/a delay for certain panels */ + apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); amdgpu_dm_update_connector_after_detect(aconnector); drm_modeset_lock_all(dev); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index fbd80d8545a8..253aac93e3d8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -55,11 +55,16 @@ static u32 edid_extract_panel_id(struct edid *edid) (u32)EDID_PRODUCT_ID(edid); } -static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) +static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct dc_edid_caps *edid_caps) { uint32_t panel_id = edid_extract_panel_id(edid); switch (panel_id) { + /* Workaround for monitors that need a delay after detecting the link */ + case drm_edid_encode_panel_id('G', 'B', 'T', 0x3215): + drm_dbg_driver(dev, "Add 10s delay for link detection for panel id %X\n", panel_id); + edid_caps->panel_patch.wait_after_dpcd_poweroff_ms = 10000; + break; /* Workaround for some monitors which does not work well with FAMS */ case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): @@ -101,6 +106,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( { struct amdgpu_dm_connector *aconnector = link->priv; struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; struct cea_sad *sads; int sad_count = -1; @@ -130,7 +136,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->edid_hdmi = connector->display_info.is_hdmi; - apply_edid_quirks(edid_buf, edid_caps); + apply_edid_quirks(dev, edid_buf, edid_caps); sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); if (sad_count <= 0) diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index e60898c2df01..acd3b373a18e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -181,6 +181,7 @@ struct dc_panel_patch { uint8_t blankstream_before_otg_off; bool oled_optimize_display_on; unsigned int force_mst_blocked_discovery; + unsigned int wait_after_dpcd_poweroff_ms; }; struct dc_edid_caps { -- 2.50.1 From a89b530373b720a94aa97ec3c4ea79dd6ee59fa7 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Mon, 24 Feb 2025 14:27:54 -0500 Subject: [PATCH 02/16] drm/amd/display: use drm_* instead of DRM_ in apply_edid_quirks() drm_* macros are more helpful that DRM_* macros since the former indicates the associated DRM device that prints the error, which maybe helpful when debugging. Signed-off-by: Aurabindo Pillai Reviewed-by: Alex Hung Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 253aac93e3d8..2cd35392e2da 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -69,7 +69,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC): - DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id); + drm_dbg_driver(dev, "Disabling FAMS on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.disable_fams = true; break; /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ @@ -78,11 +78,11 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A): case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1): case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003): - DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); + drm_dbg_driver(dev, "Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.remove_sink_ext_caps = true; break; case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): - DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id); + drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.disable_colorimetry = true; break; default: -- 2.50.1 From ca17c8e149112270b598791bbd98968fe9231115 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 3 Mar 2025 13:10:33 -0500 Subject: [PATCH 03/16] drm/amdkfd: remove unnecessary cpu domain validation before move to GTT domain. Signed-off-by: James Zhu Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 62ca12e94581..2ac6d4fa0601 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -595,12 +595,6 @@ kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment) { struct ttm_operation_ctx ctx = {.interruptible = true}; struct amdgpu_bo *bo = attachment->bo_va->base.bo; - int ret; - - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); - ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (ret) - return ret; amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); -- 2.50.1 From a91d91b6004796b868374394962331a1322da7ab Mon Sep 17 00:00:00 2001 From: Tony Yi Date: Wed, 26 Feb 2025 17:03:10 -0500 Subject: [PATCH 04/16] drm/amdgpu: Add support for CPERs on virtualization Add support for CPERs on VFs. VFs do not receive PMFW messages directly; as such, they need to query them from the host. To avoid hitting host event guard, CPER queries need to be rate limited. CPER queries share the same RAS telemetry buffer as error count query, so a mutex protecting the shared buffer was added as well. For readability, the amdgpu_detect_virtualization was refactored into multiple individual functions. Signed-off-by: Tony Yi Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 31 ++++- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 138 ++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 18 ++- drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 14 +++ 5 files changed, 195 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7b60646c6ad2..a755cc545c91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3099,7 +3099,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) amdgpu_fru_get_product_info(adev); - r = amdgpu_cper_init(adev); + if (!amdgpu_sriov_vf(adev) || amdgpu_sriov_ras_cper_en(adev)) + r = amdgpu_cper_init(adev); init_failed: @@ -4333,10 +4334,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, * for throttling interrupt) = 60 seconds. */ ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); - ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1); ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); - ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE); /* Registers mapping */ /* TODO: block userspace mapping of io register */ @@ -4368,7 +4367,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, return -ENOMEM; /* detect hw virtualization here */ - amdgpu_detect_virtualization(adev); + amdgpu_virt_init(adev); amdgpu_device_get_pcie_info(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 81a7d4faac9c..d55c8b7fdb59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -578,12 +578,32 @@ out: return result; } +static ssize_t amdgpu_debugfs_virt_ring_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_ring *ring = file_inode(f)->i_private; + + if (*pos & 3 || size & 3) + return -EINVAL; + + if (ring->funcs->type == AMDGPU_RING_TYPE_CPER) + amdgpu_virt_req_ras_cper_dump(ring->adev, false); + + return amdgpu_debugfs_ring_read(f, buf, size, pos); +} + static const struct file_operations amdgpu_debugfs_ring_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_ring_read, .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_virt_ring_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_virt_ring_read, + .llseek = default_llseek +}; + static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -671,9 +691,14 @@ void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, char name[32]; sprintf(name, "amdgpu_ring_%s", ring->name); - debugfs_create_file_size(name, S_IFREG | 0444, root, ring, - &amdgpu_debugfs_ring_fops, - ring->ring_size + 12); + if (amdgpu_sriov_vf(adev)) + debugfs_create_file_size(name, S_IFREG | 0444, root, ring, + &amdgpu_debugfs_virt_ring_fops, + ring->ring_size + 12); + else + debugfs_create_file_size(name, S_IFREG | 0444, root, ring, + &amdgpu_debugfs_ring_fops, + ring->ring_size + 12); if (ring->mqd_obj) { sprintf(name, "amdgpu_mqd_%s", ring->name); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e6f0152e5b08..ab7e73d0e7b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -739,7 +739,7 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) } } -void amdgpu_detect_virtualization(struct amdgpu_device *adev) +static u32 amdgpu_virt_init_detect_asic(struct amdgpu_device *adev) { uint32_t reg; @@ -775,8 +775,17 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } + return reg; +} + +static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg) +{ + bool is_sriov = false; + /* we have the ability to check now */ if (amdgpu_sriov_vf(adev)) { + is_sriov = true; + switch (adev->asic_type) { case CHIP_TONGA: case CHIP_FIJI: @@ -805,10 +814,39 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) amdgpu_virt_request_init_data(adev); break; default: /* other chip doesn't support SRIOV */ + is_sriov = false; DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type); break; } } + + return is_sriov; +} + +static void amdgpu_virt_init_ras(struct amdgpu_device *adev) +{ + ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1); + ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1); + + ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs, + RATELIMIT_MSG_ON_RELEASE); + ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs, + RATELIMIT_MSG_ON_RELEASE); + + mutex_init(&adev->virt.ras.ras_telemetry_mutex); + + adev->virt.ras.cper_rptr = 0; +} + +void amdgpu_virt_init(struct amdgpu_device *adev) +{ + bool is_sriov = false; + uint32_t reg = amdgpu_virt_init_detect_asic(adev); + + is_sriov = amdgpu_virt_init_req_data(adev, reg); + + if (is_sriov) + amdgpu_virt_init_ras(adev); } static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev) @@ -1288,10 +1326,12 @@ static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bo * will ignore incoming guest messages. Ratelimit the guest messages to * prevent guest self DOS. */ - if (__ratelimit(&adev->virt.ras_telemetry_rs) || force_update) { + if (__ratelimit(&virt->ras.ras_error_cnt_rs) || force_update) { + mutex_lock(&virt->ras.ras_telemetry_mutex); if (!virt->ops->req_ras_err_count(adev)) amdgpu_virt_cache_host_error_counts(adev, - adev->virt.fw_reserve.ras_telemetry); + virt->fw_reserve.ras_telemetry); + mutex_unlock(&virt->ras.ras_telemetry_mutex); } return 0; @@ -1322,6 +1362,98 @@ int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_bl return 0; } +static int +amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev, + struct amdsriov_ras_telemetry *host_telemetry, + u32 *more) +{ + struct amd_sriov_ras_cper_dump *cper_dump = NULL; + struct cper_hdr *entry = NULL; + struct amdgpu_ring *ring = &adev->cper.ring_buf; + uint32_t checksum, used_size, i; + int ret = 0; + + checksum = host_telemetry->header.checksum; + used_size = host_telemetry->header.used_size; + + if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + return 0; + + cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL); + if (!cper_dump) + return -ENOMEM; + + if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) + goto out; + + *more = cper_dump->more; + + if (cper_dump->wptr < adev->virt.ras.cper_rptr) { + dev_warn( + adev->dev, + "guest specified rptr that was too high! guest rptr: 0x%llx, host rptr: 0x%llx\n", + adev->virt.ras.cper_rptr, cper_dump->wptr); + + adev->virt.ras.cper_rptr = cper_dump->wptr; + goto out; + } + + entry = (struct cper_hdr *)&cper_dump->buf[0]; + + for (i = 0; i < cper_dump->count; i++) { + amdgpu_cper_ring_write(ring, entry, entry->record_length); + entry = (struct cper_hdr *)((char *)entry + + entry->record_length); + } + + if (cper_dump->overflow_count) + dev_warn(adev->dev, + "host reported CPER overflow of 0x%llx entries!\n", + cper_dump->overflow_count); + + adev->virt.ras.cper_rptr = cper_dump->wptr; +out: + kfree(cper_dump); + + return ret; +} + +static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + int ret = 0; + uint32_t more = 0; + + if (!amdgpu_sriov_ras_cper_en(adev)) + return -EOPNOTSUPP; + + do { + if (!virt->ops->req_ras_cper_dump(adev, virt->ras.cper_rptr)) + ret = amdgpu_virt_write_cpers_to_ring( + adev, virt->fw_reserve.ras_telemetry, &more); + else + ret = 0; + } while (more); + + return ret; +} + +int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update) +{ + struct amdgpu_virt *virt = &adev->virt; + int ret = 0; + + if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) && + down_read_trylock(&adev->reset_domain->sem)) { + mutex_lock(&virt->ras.ras_telemetry_mutex); + ret = amdgpu_virt_req_ras_cper_dump_internal(adev); + mutex_unlock(&virt->ras.ras_telemetry_mutex); + up_read(&adev->reset_domain->sem); + } + + return ret; +} + int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev) { unsigned long ue_count, ce_count; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 0f3ccae5c1ab..9f65487e60f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -96,6 +96,7 @@ struct amdgpu_virt_ops { enum amdgpu_ras_block block); bool (*rcvd_ras_intr)(struct amdgpu_device *adev); int (*req_ras_err_count)(struct amdgpu_device *adev); + int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr); }; /* @@ -140,6 +141,7 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8), AMDGIM_FEATURE_RAS_CAPS = (1 << 9), AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10), + AMDGIM_FEATURE_RAS_CPER = (1 << 11), }; enum AMDGIM_REG_ACCESS_FLAG { @@ -242,6 +244,13 @@ struct amdgpu_virt_ras_err_handler_data { int last_reserved; }; +struct amdgpu_virt_ras { + struct ratelimit_state ras_error_cnt_rs; + struct ratelimit_state ras_cper_dump_rs; + struct mutex ras_telemetry_mutex; + uint64_t cper_rptr; +}; + /* GPU virtualization */ struct amdgpu_virt { uint32_t caps; @@ -284,8 +293,7 @@ struct amdgpu_virt { union amd_sriov_ras_caps ras_en_caps; union amd_sriov_ras_caps ras_telemetry_en_caps; - - struct ratelimit_state ras_telemetry_rs; + struct amdgpu_virt_ras ras; struct amd_sriov_ras_telemetry_error_count count_cache; }; @@ -340,6 +348,9 @@ struct amdgpu_video_codec_info; #define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \ (amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk)) +#define amdgpu_sriov_ras_cper_en(adev) \ +((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER) + static inline bool is_virtual_machine(void) { #if defined(CONFIG_X86) @@ -378,7 +389,7 @@ void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); void amdgpu_virt_exchange_data(struct amdgpu_device *adev); void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); -void amdgpu_detect_virtualization(struct amdgpu_device *adev); +void amdgpu_virt_init(struct amdgpu_device *adev); bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); @@ -406,6 +417,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev); int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block, struct ras_err_data *err_data); +int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update); int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev); bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev, enum amdgpu_ras_block block); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 4dcb72d1bdda..5aadf24cb202 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -184,6 +184,9 @@ send_request: case IDH_REQ_RAS_ERROR_COUNT: event = IDH_RAS_ERROR_COUNT_READY; break; + case IDH_REQ_RAS_CPER_DUMP: + event = IDH_RAS_CPER_DUMP_READY; + break; default: break; } @@ -467,6 +470,16 @@ static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev) return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT); } +static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr) +{ + uint32_t vf_rptr_hi, vf_rptr_lo; + + vf_rptr_hi = (uint32_t)(vf_rptr >> 32); + vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF); + return xgpu_nv_send_access_requests_with_param( + adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0); +} + const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, @@ -478,4 +491,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .ras_poison_handler = xgpu_nv_ras_poison_handler, .rcvd_ras_intr = xgpu_nv_rcvd_ras_intr, .req_ras_err_count = xgpu_nv_req_ras_err_count, + .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump, }; -- 2.50.1 From 20c34e5c4af0b4a0972ae8b237c3a3866f45b082 Mon Sep 17 00:00:00 2001 From: Sathishkumar S Date: Wed, 26 Feb 2025 15:48:39 +0530 Subject: [PATCH 05/16] drm/amdgpu: Fix core reset sequence for JPEG4_0_3 For cores 1 through 7 repair the core reset sequence by adjusting offsets to access the expected registers. Signed-off-by: Sathishkumar S Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index de46dbf86477..5598a35f72af 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -1104,24 +1104,20 @@ static void jpeg_v4_0_3_core_stall_reset(struct amdgpu_ring *ring) WREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JMI0_UVD_JMI_CLIENT_STALL, reg_offset, 0x1F); - SOC15_WAIT_ON_RREG(JPEG, jpeg_inst, - regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS, - 0x1F, 0x1F); + SOC15_WAIT_ON_RREG_OFFSET(JPEG, jpeg_inst, + regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS, + reg_offset, 0x1F, 0x1F); WREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JMI0_JPEG_LMI_DROP, reg_offset, 0x1F); - WREG32_SOC15_OFFSET(JPEG, jpeg_inst, - regJPEG_CORE_RST_CTRL, - reg_offset, 1 << ring->pipe); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 1 << ring->pipe); WREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JMI0_UVD_JMI_CLIENT_STALL, reg_offset, 0x00); WREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JMI0_JPEG_LMI_DROP, reg_offset, 0x00); - WREG32_SOC15_OFFSET(JPEG, jpeg_inst, - regJPEG_CORE_RST_CTRL, - reg_offset, 0x00); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00); } static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) -- 2.50.1 From 057fef20b8401110a7bc1c2fe9d804a8a0bf0d24 Mon Sep 17 00:00:00 2001 From: Victor Lu Date: Thu, 13 Feb 2025 18:38:28 -0500 Subject: [PATCH 06/16] drm/amdgpu: Do not program AGP BAR regs under SRIOV in gfxhub_v1_0.c SRIOV VF does not have write access to AGP BAR regs. Skip the writes to avoid a dmesg warning. Signed-off-by: Victor Lu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 0e3ddea7b8e0..a7bfc9f41d0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; - /* Program the AGP BAR */ - WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0); - WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); - WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); - if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) { + /* Program the AGP BAR */ + WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0); + WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + /* Program the system aperture low logical page number. */ WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); -- 2.50.1 From bac38ca8c4755452fcd7e9f2603dea944bcfe76e Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 15 Jan 2025 15:29:34 -0500 Subject: [PATCH 07/16] drm/amdkfd: implement per queue sdma reset for gfx 9.4+ To reset hung SDMA queues on GFX 9.4+ for the GFX9 family, a soft reset must be issued through SMU. Since soft resets will reset an entire SDMA engine, use a common KGD call to do the reset as the KGD will handle avoiding a reset of in flight GFX and paging queues on that engine. In addition, create a common call for all reset types to simplify the handling of module parameter settings that block gpu resets. Signed-off-by: Jonathan Kim Reviewed-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 1 + .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 3 +- .../drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 14 +- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 9 +- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h | 2 + .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 3 +- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 9 +- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c | 7 + .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 16 ++- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 2 + .../drm/amd/amdkfd/kfd_device_queue_manager.c | 128 ++++++++++++++++-- .../gpu/drm/amd/include/kgd_kfd_interface.h | 2 + 12 files changed, 171 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index 8dfdb18197c4..6e861d08d044 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -193,4 +193,5 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr, .hqd_reset = kgd_gfx_v9_hqd_reset, + .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 9abf29b58ac7..c820418e8ccd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -419,5 +419,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr, - .hqd_reset = kgd_gfx_v9_hqd_reset + .hqd_reset = kgd_gfx_v9_hqd_reset, + .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index e2ae714a700f..0c0998477598 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -509,6 +509,17 @@ static uint32_t kgd_gfx_v9_4_3_clear_address_watch(struct amdgpu_device *adev, return 0; } +static uint32_t kgd_gfx_v9_4_3_hqd_sdma_get_doorbell(struct amdgpu_device *adev, + int engine, int queue) +{ + uint32_t reg_offset = get_sdma_rlc_reg_offset(adev, engine, queue); + uint32_t status = RREG32(regSDMA_RLC0_CONTEXT_STATUS + reg_offset); + uint32_t doorbell_off = RREG32(regSDMA_RLC0_DOORBELL_OFFSET + reg_offset); + bool is_active = !!REG_GET_FIELD(status, SDMA_RLC0_CONTEXT_STATUS, SELECTED); + + return is_active ? doorbell_off >> 2 : 0; +} + const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping, @@ -543,5 +554,6 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { .set_address_watch = kgd_gfx_v9_4_3_set_address_watch, .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch, .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr, - .hqd_reset = kgd_gfx_v9_hqd_reset + .hqd_reset = kgd_gfx_v9_hqd_reset, + .hqd_sdma_get_doorbell = kgd_gfx_v9_4_3_hqd_sdma_get_doorbell }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 62176d607bef..2887b6f3eaa2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -1084,6 +1084,12 @@ uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev, return 0; } +uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev, + int engine, int queue) +{ + return 0; +} + const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, @@ -1112,5 +1118,6 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .program_trap_handler_settings = program_trap_handler_settings, .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr, - .hqd_reset = kgd_gfx_v10_hqd_reset + .hqd_reset = kgd_gfx_v10_hqd_reset, + .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index 9efd2dd4fdd7..db577c2a847a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -65,3 +65,5 @@ uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev, uint32_t queue_id, uint32_t inst, unsigned int utimeout); +uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev, + int engine, int queue); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index c718bedda0ca..ac9ad505f9d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -682,5 +682,6 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .set_address_watch = kgd_gfx_v10_set_address_watch, .clear_address_watch = kgd_gfx_v10_clear_address_watch, .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr, - .hqd_reset = kgd_gfx_v10_hqd_reset + .hqd_reset = kgd_gfx_v10_hqd_reset, + .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index a4ba49cb22db..e0e6a6a49d90 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -800,6 +800,12 @@ static uint64_t kgd_gfx_v11_hqd_reset(struct amdgpu_device *adev, return 0; } +static uint32_t kgd_gfx_v11_hqd_sdma_get_doorbell(struct amdgpu_device *adev, + int engine, int queue) +{ + return 0; +} + const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .program_sh_mem_settings = program_sh_mem_settings_v11, .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11, @@ -824,5 +830,6 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .set_address_watch = kgd_gfx_v11_set_address_watch, .clear_address_watch = kgd_gfx_v11_clear_address_watch, .hqd_get_pq_addr = kgd_gfx_v11_hqd_get_pq_addr, - .hqd_reset = kgd_gfx_v11_hqd_reset + .hqd_reset = kgd_gfx_v11_hqd_reset, + .hqd_sdma_get_doorbell = kgd_gfx_v11_hqd_sdma_get_doorbell }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c index 0dfe7093bd8a..6f0dc23c901b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c @@ -361,6 +361,12 @@ static uint32_t kgd_gfx_v12_clear_address_watch(struct amdgpu_device *adev, return 0; } +static uint32_t kgd_gfx_v12_hqd_sdma_get_doorbell(struct amdgpu_device *adev, + int engine, int queue) +{ + return 0; +} + const struct kfd2kgd_calls gfx_v12_kfd2kgd = { .init_interrupts = init_interrupts_v12, .hqd_dump = hqd_dump_v12, @@ -374,4 +380,5 @@ const struct kfd2kgd_calls gfx_v12_kfd2kgd = { .set_wave_launch_mode = kgd_gfx_v12_set_wave_launch_mode, .set_address_watch = kgd_gfx_v12_set_address_watch, .clear_address_watch = kgd_gfx_v12_clear_address_watch, + .hqd_sdma_get_doorbell = kgd_gfx_v12_hqd_sdma_get_doorbell }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 441568163e20..84135eb90660 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -1131,9 +1131,6 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev, uint32_t low, high; uint64_t queue_addr = 0; - if (!amdgpu_gpu_recovery) - return 0; - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); amdgpu_gfx_rlc_enter_safe_mode(adev, inst); @@ -1182,9 +1179,6 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev, uint32_t low, high, pipe_reset_data = 0; uint64_t queue_addr = 0; - if (!amdgpu_gpu_recovery) - return 0; - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); amdgpu_gfx_rlc_enter_safe_mode(adev, inst); @@ -1229,6 +1223,13 @@ unlock_out: return queue_addr; } +uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev, + int engine, int queue) + +{ + return 0; +} + const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, @@ -1258,5 +1259,6 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr, - .hqd_reset = kgd_gfx_v9_hqd_reset + .hqd_reset = kgd_gfx_v9_hqd_reset, + .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index b6a91a552aa4..90c8fa13d519 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -111,3 +111,5 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev, uint32_t queue_id, uint32_t inst, unsigned int utimeout); +uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev, + int engine, int queue); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 91e4988dc1e3..f3f2fd6ee65c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -36,6 +36,7 @@ #include "kfd_kernel_queue.h" #include "amdgpu_amdkfd.h" #include "amdgpu_reset.h" +#include "amdgpu_sdma.h" #include "mes_v11_api_def.h" #include "kfd_debug.h" @@ -67,6 +68,8 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q); static int allocate_sdma_queue(struct device_queue_manager *dqm, struct queue *q, const uint32_t *restore_sdma_id); +static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma); + static inline enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) { @@ -2205,8 +2208,7 @@ static struct queue *find_queue_by_address(struct device_queue_manager *dqm, uin return NULL; } -/* only for compute queue */ -static int reset_queues_on_hws_hang(struct device_queue_manager *dqm) +static int reset_hung_queues(struct device_queue_manager *dqm) { int r = 0, reset_count = 0, i; @@ -2259,6 +2261,104 @@ reset_fail: return r; } +static bool sdma_has_hang(struct device_queue_manager *dqm) +{ + int engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm); + int engine_end = engine_start + get_num_all_sdma_engines(dqm); + int num_queues_per_eng = dqm->dev->kfd->device_info.num_sdma_queues_per_engine; + int i, j; + + for (i = engine_start; i < engine_end; i++) { + for (j = 0; j < num_queues_per_eng; j++) { + if (!dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j)) + continue; + + return true; + } + } + + return false; +} + +static bool set_sdma_queue_as_reset(struct device_queue_manager *dqm, + uint32_t doorbell_off) +{ + struct device_process_node *cur; + struct qcm_process_device *qpd; + struct queue *q; + + list_for_each_entry(cur, &dqm->queues, list) { + qpd = cur->qpd; + list_for_each_entry(q, &qpd->queues_list, list) { + if ((q->properties.type == KFD_QUEUE_TYPE_SDMA || + q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) && + q->properties.doorbell_off == doorbell_off) { + set_queue_as_reset(dqm, q, qpd); + return true; + } + } + } + + return false; +} + +static int reset_hung_queues_sdma(struct device_queue_manager *dqm) +{ + int engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm); + int engine_end = engine_start + get_num_all_sdma_engines(dqm); + int num_queues_per_eng = dqm->dev->kfd->device_info.num_sdma_queues_per_engine; + int r = 0, i, j; + + if (dqm->is_hws_hang) + return -EIO; + + /* Scan for hung HW queues and reset engine. */ + dqm->detect_hang_count = 0; + for (i = engine_start; i < engine_end; i++) { + for (j = 0; j < num_queues_per_eng; j++) { + uint32_t doorbell_off = + dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j); + + if (!doorbell_off) + continue; + + /* Reset engine and check. */ + if (amdgpu_sdma_reset_engine(dqm->dev->adev, i, false) || + dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j) || + !set_sdma_queue_as_reset(dqm, doorbell_off)) { + r = -ENOTRECOVERABLE; + goto reset_fail; + } + + /* Should only expect one queue active per engine */ + dqm->detect_hang_count++; + break; + } + } + + /* Signal process reset */ + if (dqm->detect_hang_count) + kfd_signal_reset_event(dqm->dev); + else + r = -ENOTRECOVERABLE; + +reset_fail: + dqm->detect_hang_count = 0; + + return r; +} + +static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma) +{ + while (halt_if_hws_hang) + schedule(); + + if (!amdgpu_gpu_recovery) + return -ENOTRECOVERABLE; + + return is_sdma ? reset_hung_queues_sdma(dqm) : reset_hung_queues(dqm); +} + /* dqm->lock mutex has to be locked before calling this function */ static int unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, @@ -2309,16 +2409,13 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, * check those fields */ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; - if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) { - while (halt_if_hws_hang) - schedule(); - if (reset_queues_on_hws_hang(dqm)) { - dqm->is_hws_hang = true; - kfd_hws_hang(dqm); - retval = -ETIME; - goto out; - } - } + if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd) && + reset_queues_on_hws_hang(dqm, false)) + goto reset_fail; + + /* Check for SDMA hang and attempt SDMA reset */ + if (sdma_has_hang(dqm) && reset_queues_on_hws_hang(dqm, true)) + goto reset_fail; /* We need to reset the grace period value for this device */ if (grace_period != USE_DEFAULT_GRACE_PERIOD) { @@ -2329,10 +2426,15 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, pm_release_ib(&dqm->packet_mgr); dqm->active_runlist = false; - out: up_read(&dqm->dev->adev->reset_domain->sem); return retval; + +reset_fail: + dqm->is_hws_hang = true; + kfd_hws_hang(dqm); + up_read(&dqm->dev->adev->reset_domain->sem); + return -ETIME; } /* only for compute queue */ diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index e3e635a31b8a..1e8dfa6c0dc8 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -330,6 +330,8 @@ struct kfd2kgd_calls { uint64_t (*hqd_reset)(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t inst, unsigned int utimeout); + uint32_t (*hqd_sdma_get_doorbell)(struct amdgpu_device *adev, + int engine, int queue); }; #endif /* KGD_KFD_INTERFACE_H_INCLUDED */ -- 2.50.1 From ceb7114c961bd8d8605dfff8e18d1a39d99cdd30 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 26 Feb 2025 14:22:02 -0500 Subject: [PATCH 08/16] drm/amdkfd: flag per-sdma queue reset supported to user space Similar to compute queue reset, flag SDMA queue reset capabilities to user space for safe testing. Signed-off-by: Jonathan Kim Reviewed-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 5 +++++ drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 1 + include/uapi/linux/kfd_sysfs.h | 3 +++ 3 files changed, 9 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index dbc5595e999a..27e7356eed6f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -519,6 +519,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->gpu->kfd->mec_fw_version); sysfs_show_32bit_prop(buffer, offs, "capability", dev->node_props.capability); + sysfs_show_32bit_prop(buffer, offs, "capability2", + dev->node_props.capability2); sysfs_show_64bit_prop(buffer, offs, "debug_prop", dev->node_props.debug_prop); sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", @@ -1981,6 +1983,9 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) if (kfd_dbg_has_ttmps_always_setup(dev->gpu)) dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; + if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE) + dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED; + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) { if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3) || KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 4)) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index f06c9db7ddde..3de8ec0043bb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -51,6 +51,7 @@ struct kfd_node_properties { uint32_t cpu_core_id_base; uint32_t simd_id_base; uint32_t capability; + uint32_t capability2; uint64_t debug_prop; uint32_t max_waves_per_simd; uint32_t lds_size_in_kb; diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h index 859b8e91d4d3..1125fe47959f 100644 --- a/include/uapi/linux/kfd_sysfs.h +++ b/include/uapi/linux/kfd_sysfs.h @@ -63,6 +63,9 @@ #define HSA_CAP_PER_QUEUE_RESET_SUPPORTED 0x80000000 #define HSA_CAP_RESERVED 0x000f8000 +#define HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED 0x00000001 +#define HSA_CAP2_RESERVED 0xfffffffe + /* debug_prop bits in node properties */ #define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK 0x0000000f #define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_SHIFT 0 -- 2.50.1 From a29936bcd21eea7ac87546e2107313cd0f62c4d7 Mon Sep 17 00:00:00 2001 From: Sathishkumar S Date: Wed, 26 Feb 2025 15:59:47 +0530 Subject: [PATCH 09/16] drm/amdgpu: Fix core reset sequence for JPEG5_0_1 For cores 1 through 9 repair the core reset sequence by adjusting offsets to access the expected registers. Signed-off-by: Sathishkumar S Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c index 56c01d207e20..218e16b68f1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c @@ -672,24 +672,20 @@ static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring) WREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JMI0_UVD_JMI_CLIENT_STALL, reg_offset, 0x1F); - SOC15_WAIT_ON_RREG(JPEG, jpeg_inst, - regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS, - 0x1F, 0x1F); + SOC15_WAIT_ON_RREG_OFFSET(JPEG, jpeg_inst, + regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS, + reg_offset, 0x1F, 0x1F); WREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JMI0_JPEG_LMI_DROP, reg_offset, 0x1F); - WREG32_SOC15_OFFSET(JPEG, jpeg_inst, - regJPEG_CORE_RST_CTRL, - reg_offset, 1 << ring->pipe); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 1 << ring->pipe); WREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JMI0_UVD_JMI_CLIENT_STALL, reg_offset, 0x00); WREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JMI0_JPEG_LMI_DROP, reg_offset, 0x00); - WREG32_SOC15_OFFSET(JPEG, jpeg_inst, - regJPEG_CORE_RST_CTRL, - reg_offset, 0x00); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00); } static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) -- 2.50.1 From 3646cc65e2747ff112d7de1a05a2e756414b771e Mon Sep 17 00:00:00 2001 From: Victor Lu Date: Thu, 13 Feb 2025 18:41:26 -0500 Subject: [PATCH 10/16] drm/amdgpu: Do not write to GRBM_CNTL if Aldebaran SRIOV Aldebaran SRIOV VF does not have write permissions to GRBM_CTNL. This access can be skipped to avoid a dmesg warning. v2: Use GC IP version check instead of asic check Signed-off-by: Victor Lu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f7938d318f26..1f0f03108a82 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2637,7 +2637,10 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) u32 tmp; int i; - WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); + if (!amdgpu_sriov_vf(adev) || + amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) { + WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); + } gfx_v9_0_tiling_mode_table_init(adev); -- 2.50.1 From 571d36837c84707ea36fa37ab1373a124e328ed4 Mon Sep 17 00:00:00 2001 From: Charles Han Date: Wed, 5 Mar 2025 18:40:57 +0800 Subject: [PATCH 11/16] drm/amdgpu: fix inconsistent indenting warning Fix below inconsistent indenting smatch warning. smatch warnings: drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c:582 amdgpu_sdma_reset_engine() warn: inconsistent indenting Signed-off-by: Charles Han Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 39669f8788a7..3a4cef896018 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -621,5 +621,5 @@ exit: if (suspend_user_queues) amdgpu_amdkfd_resume(adev, false); - return ret; + return ret; } -- 2.50.1 From 14c8097ba4db1b6e1c28b2ed65186b9199fe9155 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 27 Feb 2025 12:25:25 -0500 Subject: [PATCH 12/16] drm/amdkfd: remove unused debug gws support status variable Remove unused declaration of gws_debug_workaround. Signed-off-by: Jonathan Kim Reviewed-by: Amber Lin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 966d1c484d9f..bb09c873a9a5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -289,7 +289,6 @@ struct kfd_node { /* Global GWS resource shared between processes */ void *gws; - bool gws_debug_workaround; /* Clients watching SMI events */ struct list_head smi_clients; -- 2.50.1 From 94b0908b85524d467a00c6aa2a277ef98fd8b152 Mon Sep 17 00:00:00 2001 From: Victor Lu Date: Thu, 13 Feb 2025 18:49:46 -0500 Subject: [PATCH 13/16] drm/amdgpu: Do not set power brake sequence for Aldebaran SRIOV Aldebaran SRIOV VF cannot access the power brake feature regs. The accesses can be skipped to avoid a dmesg warning. v2: Remove redundant asic type check Signed-off-by: Victor Lu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 1f0f03108a82..d345285ea885 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4045,7 +4045,8 @@ static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) && + !amdgpu_sriov_vf(adev)) gfx_v9_4_2_set_power_brake_sequence(adev); return r; -- 2.50.1 From fe2fa3be3d59ba67d6de54a0064441ec233cb50c Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 3 Mar 2025 15:10:22 +0800 Subject: [PATCH 14/16] drm/amdgpu: Fix missing drain retry fault the last entry While the entry get in svm_range_unmap_from_cpu is the last entry, and the entry is page fault, it also need to be dropped. So for equal case, it also need to be dropped. v2: Only modify the svm_range_restore_pages. Signed-off-by: Emily Deng Reviewed-by: Xiaogang Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 3 +++ drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 7d4395a5d8ac..b0a88f92cd82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -78,6 +78,9 @@ struct amdgpu_ih_ring { #define amdgpu_ih_ts_after(t1, t2) \ (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL) +#define amdgpu_ih_ts_after_or_equal(t1, t2) \ + (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) >= 0LL) + /* provided by the ih block */ struct amdgpu_ih_funcs { /* ring read/write ptr handling, called from interrupt context */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index db3034b00dac..1a38ac75abbd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -3011,7 +3011,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, /* check if this page fault time stamp is before svms->checkpoint_ts */ if (svms->checkpoint_ts[gpuidx] != 0) { - if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) { + if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) { pr_debug("draining retry fault, drop fault 0x%llx\n", addr); r = 0; goto out; -- 2.50.1 From 334dc5fcc3f177823115ec4e075259997c16d4a7 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 6 Mar 2025 11:36:49 +0800 Subject: [PATCH 15/16] drm/amdgpu: increase RAS bad page threshold For default policy, driver will issue an RMA event when the number of bad pages is greater than 8 physical rows, rather than reaches 8 physical rows, don't rely on threshold configurable parameters in default mode. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index ab27cecb5519..09a6f8bc1a5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -747,7 +747,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) /* Modify the header if it exceeds. */ if (amdgpu_bad_page_threshold != 0 && - control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) { + control->ras_num_bad_pages > ras->bad_page_cnt_threshold) { dev_warn(adev->dev, "Saved bad pages %d reaches threshold value %d\n", control->ras_num_bad_pages, ras->bad_page_cnt_threshold); @@ -806,7 +806,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) */ if (amdgpu_bad_page_threshold != 0 && control->tbl_hdr.version == RAS_TABLE_VER_V2_1 && - control->ras_num_bad_pages < ras->bad_page_cnt_threshold) + control->ras_num_bad_pages <= ras->bad_page_cnt_threshold) control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold - control->ras_num_bad_pages) * 100) / ras->bad_page_cnt_threshold; @@ -1456,7 +1456,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) res); return -EINVAL; } - if (ras->bad_page_cnt_threshold > control->ras_num_bad_pages) { + if (ras->bad_page_cnt_threshold >= control->ras_num_bad_pages) { /* This means that, the threshold was increased since * the last time the system was booted, and now, * ras->bad_page_cnt_threshold - control->num_recs > 0, -- 2.50.1 From 3bc7bc73af7d167e564eb09ed17af0eed24b5110 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Mon, 3 Mar 2025 21:03:03 +0800 Subject: [PATCH 16/16] drm/amdgpu: retire ip init code specific for A0 rev For aqua_vanjaram, A0 HW is retired so remove the code specific for it in gfx ip init. Signed-off-by: Shiwu Zhang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 87add6274b98..b276a16a8121 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -349,18 +349,7 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG, GOLDEN_GB_ADDR_CONFIG); - if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) { - WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1); - } else { - /* Golden settings applied by driver for ASIC with rev_id 0 */ - if (adev->rev_id == 0) { - WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1, - REDUCE_FIFO_DEPTH_BY_2, 2); - } else { - WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, - SPARE, 0x1); - } - } + WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1); } } -- 2.50.1