]> www.infradead.org Git - users/hch/misc.git/commitdiff
drm/amd/display: Revert "correct sw cache timing to ensure dispclk ramping"
authorCharlene Liu <Charlene.Liu@amd.com>
Fri, 12 Sep 2025 16:37:30 +0000 (12:37 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 23 Sep 2025 14:32:33 +0000 (10:32 -0400)
[why]
Need consider SSC enabled case

This reverts commit f1fd8a9ac2aa5118f76baf28e6ca4d6962a485be.

Reviewed-by: Ovidiu (Ovi) Bunea <ovidiu.bunea@amd.com>
Reviewed-by: Chris Park <chris.park@amd.com>
Signed-off-by: Charlene Liu <Charlene.Liu@amd.com>
Signed-off-by: Ivan Lipski <ivan.lipski@amd.com>
Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c

index 6fc8a74916849f89360c576b196f81c48ded7611..86edf11b8c5a84933fc88b7fe176f28a8b98f1de 100644 (file)
@@ -387,7 +387,6 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
        bool update_dispclk = false;
        bool dpp_clock_lowered = false;
        int all_active_disps = 0;
-       int actual_dppclk = 0;
 
        if (dc->work_arounds.skip_clock_update)
                return;
@@ -473,13 +472,14 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
        if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
                if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
                        dpp_clock_lowered = true;
+               clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
                update_dppclk = true;
        }
 
        if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
            (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
                int requested_dispclk_khz = new_clocks->dispclk_khz;
-               int actual_dispclk;
+
                dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
 
                /* Clamp the requested clock to PMFW based on their limit. */
@@ -487,11 +487,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
                        requested_dispclk_khz = dc->debug.min_disp_clk_khz;
 
                dcn35_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
-               actual_dispclk = REG_READ(CLK1_CLK0_CURRENT_CNT);
-
-               /*pmfw might set bypass clock which is higher than hardmin*/
-               if (actual_dispclk >= new_clocks->dispclk_khz)
-                       clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+               clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
 
                dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
 
@@ -509,20 +505,13 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
        if (dpp_clock_lowered) {
                // increase per DPP DTO before lowering global dppclk
                dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
-               dcn35_smu_set_dppclk(clk_mgr, new_clocks->dppclk_khz);
+               dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
        } else {
                // increase global DPPCLK before lowering per DPP DTO
                if (update_dppclk || update_dispclk)
-                       dcn35_smu_set_dppclk(clk_mgr, new_clocks->dppclk_khz);
+                       dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
                dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
        }
-       if (update_dppclk) {
-               actual_dppclk = REG_READ(CLK1_CLK1_CURRENT_CNT);
-
-               /*pmfw might set bypass clock which is higher than hardmin*/
-               if (actual_dppclk >= new_clocks->dppclk_khz)
-                       clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
-       }
 
        // notify PMFW of bandwidth per DPIA tunnel
        if (dc->debug.notify_dpia_hr_bw)
@@ -562,7 +551,7 @@ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
         * since fractional part is only 16 bit in register definition but is 32 bit
         * in our fix point definiton, need to shift left by 16 to obtain correct value
         */
-       pll_req.value |= (long long) fbmult_frac_val << 16;
+       pll_req.value |= fbmult_frac_val << 16;
 
        /* multiply by REFCLK period */
        pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
@@ -789,8 +778,7 @@ static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct
                        table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
 
                        /* Modify previous watermark range to cover up to max */
-                       if (num_valid_sets > 0)
-                               table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
+                       table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
                }
                num_valid_sets++;
        }
@@ -951,8 +939,8 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
               is_valid_clock_value(min_dram_speed_mts));
 
        /* dispclk and dppclk can be max at any voltage, same number of levels for both */
-       if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS) {
-               /*numDispclk is the same as numDPPclk*/
+       if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
+           clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
                max_dispclk = find_max_clk_value(clock_table->DispClocks,
                        clock_table->NumDispClkLevelsEnabled);
                max_dppclk = find_max_clk_value(clock_table->DppClocks,