]> www.infradead.org Git - users/hch/configfs.git/commitdiff
drm/amd/display: Unlock Pipes Based On DET Allocation
authorAustin Zheng <Austin.Zheng@amd.com>
Tue, 30 Jul 2024 15:55:23 +0000 (11:55 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 13 Aug 2024 14:32:23 +0000 (10:32 -0400)
[Why]
DML21 does not allocate DET evenly between pipes.
May result in underflow when unlocking the pipes as DET could
be overallocated.

[How]
1. Unlock pipes that have a decreased amount of DET allocation
2. Wait for the double buffer to be updated.
3. Unlock the remaining pipes.

Reviewed-by: Alvin Lee <alvin.lee2@amd.com>
Signed-off-by: Austin Zheng <Austin.Zheng@amd.com>
Signed-off-by: Tom Chung <chiahsuan.chung@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
drivers/gpu/drm/amd/display/dc/inc/resource.h

index 1c379a6b1b4c73656c2b4e28bb8fbfbcb0f88f7d..b38340c690c6020796bde19c31ea8940ddf0522a 100644 (file)
@@ -5275,3 +5275,31 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
        dml2_options->svp_pstate.callbacks.remove_phantom_streams_and_planes = &dc_state_remove_phantom_streams_and_planes;
        dml2_options->svp_pstate.callbacks.release_phantom_streams_and_planes = &dc_state_release_phantom_streams_and_planes;
 }
+
+/* Returns number of DET segments allocated for a given OTG_MASTER pipe */
+int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master)
+{
+       struct pipe_ctx *opp_heads[MAX_PIPES];
+       struct pipe_ctx *dpp_pipes[MAX_PIPES];
+
+       int dpp_count = 0;
+       int det_segments = 0;
+
+       if (!otg_master->stream)
+               return 0;
+
+       int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+                       &state->res_ctx, opp_heads);
+
+       for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+               if (opp_heads[slice_idx]->plane_state) {
+                       dpp_count = resource_get_dpp_pipes_for_opp_head(
+                                       opp_heads[slice_idx],
+                                       &state->res_ctx,
+                                       dpp_pipes);
+                       for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++)
+                               det_segments += dpp_pipes[dpp_idx]->hubp_regs.det_size;
+               }
+       }
+       return det_segments;
+}
index d36f758971a8cf721eebeb08142dc2af407cab77..37d26fa0b6fbb9fc53077ce8205220340b22847d 100644 (file)
@@ -1170,6 +1170,28 @@ static void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned comp
        }
 }
 
+static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst)
+{
+       struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+       switch (hubp_inst) {
+       case 0:
+               REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100000); /* 1 vupdate at 10hz */
+               break;
+       case 1:
+               REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100000);
+               break;
+       case 2:
+               REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100000);
+               break;
+       case 3:
+               REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100000);
+               break;
+       default:
+               break;
+       }
+}
+
 static const struct hubbub_funcs hubbub4_01_funcs = {
        .update_dchub = hubbub2_update_dchub,
        .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
@@ -1192,6 +1214,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
        .set_request_limit = hubbub32_set_request_limit,
        .program_det_segments = dcn401_program_det_segments,
        .program_compbuf_segments = dcn401_program_compbuf_segments,
+       .wait_for_det_update = dcn401_wait_for_det_update,
 };
 
 void hubbub401_construct(struct dcn20_hubbub *hubbub2,
index 44c1184868e095476d44a8ed6c4e0f63d144fdcc..22c7afbcfc4e29e7f95b42b6044419e364663658 100644 (file)
@@ -1663,3 +1663,94 @@ void dcn401_hardware_release(struct dc *dc)
        }
 }
 
+void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
+{
+       struct pipe_ctx *opp_heads[MAX_PIPES];
+       struct pipe_ctx *dpp_pipes[MAX_PIPES];
+       struct hubbub *hubbub = dc->res_pool->hubbub;
+       int dpp_count = 0;
+
+       if (!otg_master->stream)
+               return;
+
+       int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+                       &context->res_ctx, opp_heads);
+
+       for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+               if (opp_heads[slice_idx]->plane_state) {
+                       dpp_count = resource_get_dpp_pipes_for_opp_head(
+                                       opp_heads[slice_idx],
+                                       &context->res_ctx,
+                                       dpp_pipes);
+                       for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
+                               struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx];
+                                       if (dpp_pipe && hubbub &&
+                                               dpp_pipe->plane_res.hubp &&
+                                               hubbub->funcs->wait_for_det_update)
+                                               hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
+                       }
+               }
+       }
+}
+
+void dcn401_interdependent_update_lock(struct dc *dc,
+               struct dc_state *context, bool lock)
+{
+       unsigned int i = 0;
+       struct pipe_ctx *pipe = NULL;
+       struct timing_generator *tg = NULL;
+       bool pipe_unlocked[MAX_PIPES] = {0};
+
+       if (lock) {
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       pipe = &context->res_ctx.pipe_ctx[i];
+                       tg = pipe->stream_res.tg;
+
+                       if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+                                       !tg->funcs->is_tg_enabled(tg) ||
+                                       dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+                               continue;
+                       dc->hwss.pipe_control_lock(dc, pipe, true);
+               }
+       } else {
+               /* Unlock pipes based on the change in DET allocation instead of pipe index
+                * Prevents over allocation of DET during unlock process
+                * e.g. 2 pipe config with different streams with a max of 20 DET segments
+                *      Before:                                                         After:
+                *              - Pipe0: 10 DET segments                        - Pipe0: 12 DET segments
+                *              - Pipe1: 10 DET segments                        - Pipe1: 8 DET segments
+                * If Pipe0 gets updated first, 22 DET segments will be allocated
+                */
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       pipe = &context->res_ctx.pipe_ctx[i];
+                       tg = pipe->stream_res.tg;
+                       int current_pipe_idx = i;
+
+                       if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+                                       !tg->funcs->is_tg_enabled(tg) ||
+                                       dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+                               pipe_unlocked[i] = true;
+                               continue;
+                       }
+
+                       // If the same stream exists in old context, ensure the OTG_MASTER pipes for the same stream get compared
+                       struct pipe_ctx *old_otg_master = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, pipe->stream);
+
+                       if (old_otg_master)
+                               current_pipe_idx = old_otg_master->pipe_idx;
+                       if (resource_calculate_det_for_stream(context, pipe) <
+                                       resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[current_pipe_idx])) {
+                               dc->hwss.pipe_control_lock(dc, pipe, false);
+                               pipe_unlocked[i] = true;
+                               dcn401_wait_for_det_buffer_update(dc, context, pipe);
+                       }
+               }
+
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       if (pipe_unlocked[i])
+                               continue;
+                       pipe = &context->res_ctx.pipe_ctx[i];
+                       dc->hwss.pipe_control_lock(dc, pipe, false);
+               }
+       }
+}
index 8e9c1c17aa6627b517adb1de91337b39a3045462..3ecb1ebffcee87f5724766d3b8f73494b123bbe6 100644 (file)
@@ -81,4 +81,6 @@ void dcn401_hardware_release(struct dc *dc);
 void dcn401_update_odm(struct dc *dc, struct dc_state *context,
                struct pipe_ctx *otg_master);
 void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy);
+void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
+void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
 #endif /* __DC_HWSS_DCN401_H__ */
index f4eda4a55ea72f9eca055bd63d8f3a008e8eb98e..b5f63675afcb588c416157b58b3e5ddc023051ca 100644 (file)
@@ -38,7 +38,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
        .disable_audio_stream = dce110_disable_audio_stream,
        .disable_plane = dcn20_disable_plane,
        .pipe_control_lock = dcn20_pipe_control_lock,
-       .interdependent_update_lock = dcn32_interdependent_update_lock,
+       .interdependent_update_lock = dcn401_interdependent_update_lock,
        .cursor_lock = dcn10_cursor_lock,
        .prepare_bandwidth = dcn401_prepare_bandwidth,
        .optimize_bandwidth = dcn401_optimize_bandwidth,
index dd2b2864876c799bbc8cc4a95e8524574bda568a..67c32401893e862949c97c5d18ae5b3c69c83f8e 100644 (file)
@@ -227,6 +227,7 @@ struct hubbub_funcs {
        void (*get_mall_en)(struct hubbub *hubbub, unsigned int *mall_in_use);
        void (*program_det_segments)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg);
        void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
+       void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
 };
 
 struct hubbub {
index 96d40d33a1f99e550aa867e4d4f358231aac94ef..9cd80d3864c7b45679de64e8764f10fd203684ee 100644 (file)
@@ -639,4 +639,9 @@ struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx);
  * @dml2_options: struct to hold callbacks
  */
 void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options);
+
+/*
+ *Calculate total DET allocated for all pipes for a given OTG_MASTER pipe
+ */
+int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master);
 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */