]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
drm/amd/display: Update FAMS sequence for DCN30 & DCN32
authorAlvin Lee <alvin.lee2@amd.com>
Fri, 6 Oct 2023 22:01:24 +0000 (18:01 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 26 Oct 2023 23:00:23 +0000 (19:00 -0400)
Provide DCN32 specific sequence and update DCN30 sequence

Reviewed-by: Samson Tam <samson.tam@amd.com>
Acked-by: Roman Li <roman.li@amd.com>
Signed-off-by: Alvin Lee <alvin.lee2@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h

index 90f061edb64cec2d90a721c3f0276e5c8eefb249..427cfc8c24a4b7ed4cee1f0b6955cbe371797219 100644 (file)
@@ -60,7 +60,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
        .pipe_control_lock = dcn20_pipe_control_lock,
        .interdependent_update_lock = dcn10_lock_all_pipes,
        .cursor_lock = dcn10_cursor_lock,
-       .prepare_bandwidth = dcn30_prepare_bandwidth,
+       .prepare_bandwidth = dcn32_prepare_bandwidth,
        .optimize_bandwidth = dcn20_optimize_bandwidth,
        .update_bandwidth = dcn20_update_bandwidth,
        .set_drr = dcn10_set_drr,
index 1c839e52bae5e85471e8dce86146c1508504bea9..d71faf2ecd413c3fcc8e06aca2a27f2b8cc4cdb3 100644 (file)
@@ -993,11 +993,7 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
 void dcn30_prepare_bandwidth(struct dc *dc,
        struct dc_state *context)
 {
-       bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
-       /* Any transition into an FPO config should disable MCLK switching first to avoid
-        * driver and FW P-State synchronization issues.
-        */
-       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !dc->clk_mgr->clks.fw_based_mclk_switching) {
                dc->optimized_required = true;
                context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
        }
@@ -1008,20 +1004,9 @@ void dcn30_prepare_bandwidth(struct dc *dc,
                        dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
 
        dcn20_prepare_bandwidth(dc, context);
-       /*
-        * enabled -> enabled: do not disable
-        * enabled -> disabled: disable
-        * disabled -> enabled: don't care
-        * disabled -> disabled: don't care
-        */
-       if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
-               dc_dmub_srv_p_state_delegate(dc, false, context);
 
-       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
-               /* After disabling P-State, restore the original value to ensure we get the correct P-State
-                * on the next optimize. */
-               context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
-       }
+       if (!dc->clk_mgr->clks.fw_based_mclk_switching)
+               dc_dmub_srv_p_state_delegate(dc, false, context);
 }
 
 void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
index 2173d84e4953fcdb4c94312532a5992a7276d3f2..e837554b8a28cc4ff51a8ea145a3854a913a0e39 100644 (file)
@@ -50,6 +50,7 @@
 #include "dce/dmub_hw_lock_mgr.h"
 #include "dcn32/dcn32_resource.h"
 #include "link.h"
+#include "../dcn20/dcn20_hwseq.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -1677,3 +1678,33 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
 
        return is_seamless;
 }
+
+void dcn32_prepare_bandwidth(struct dc *dc,
+       struct dc_state *context)
+{
+       bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+       /* Any transition into an FPO config should disable MCLK switching first to avoid
+        * driver and FW P-State synchronization issues.
+        */
+       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+               dc->optimized_required = true;
+               context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+       }
+
+       if (dc->clk_mgr->dc_mode_softmax_enabled)
+               if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
+                               context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
+                       dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
+
+       dcn20_prepare_bandwidth(dc, context);
+
+       if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
+               dc_dmub_srv_p_state_delegate(dc, false, context);
+
+       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+               /* After disabling P-State, restore the original value to ensure we get the correct P-State
+                * on the next optimize.
+                */
+               context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
+       }
+}
index 9992e40acd217b9ad224d892f7ae6ff16f81a231..cecf7f0f567190b257cf81e5f756b5a916eba09c 100644 (file)
@@ -124,4 +124,7 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
                const struct dc_state *cur_ctx,
                const struct dc_state *new_ctx);
 
+void dcn32_prepare_bandwidth(struct dc *dc,
+       struct dc_state *context);
+
 #endif /* __DC_HWSS_DCN32_H__ */