]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
drm/amd/display: Detect and disallow idle reallow during reentrancy
authorNicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Mon, 26 Feb 2024 19:29:05 +0000 (14:29 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 22 Mar 2024 19:49:29 +0000 (15:49 -0400)
[Why]
Cursor updates can be preempted by queued flips in some DMs.

The synchronization model causes this to occur within the same thread
at an intermediate level when we insert logs into the OS queue.

Since this occurs on the same thread and we're still holding the lock
(recursively) the cache is coherent.

The exit sequence will run twice since we technically haven't finished
the exit the first time, so we need a way to detect and avoid the
reallow in the middle of this call to prevent the hang on the cursor
update that was preempted.

[How]
Keep a counter that tracks the depth of the exit calls. Do not reallow
until the counter is zero.

Reviewed-by: Duncan Ma <duncan.ma@amd.com>
Acked-by: Wayne Lin <wayne.lin@amd.com>
Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h

index f796ed06187920c4f2d434e332cd7a1dacb851e6..4878e9e50440523e2d5197bdb7fff6412ab2f54d 100644 (file)
@@ -1437,6 +1437,8 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_
         */
 
        if (!allow_idle) {
+               dc_dmub_srv->idle_exit_counter += 1;
+
                dc_dmub_srv_exit_low_power_state(dc);
                /*
                 * Idle is considered fully exited only after the sequence above
@@ -1448,6 +1450,12 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_
                 * dm_execute_dmub_cmd submission instead of the "wake" helpers.
                 */
                dc_dmub_srv->idle_allowed = false;
+
+               dc_dmub_srv->idle_exit_counter -= 1;
+               if (dc_dmub_srv->idle_exit_counter < 0) {
+                       ASSERT(0);
+                       dc_dmub_srv->idle_exit_counter = 0;
+               }
        } else {
                /* Consider idle as notified prior to the actual submission to
                 * prevent multiple entries. */
@@ -1489,7 +1497,8 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in
        else
                result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
 
-       if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
+       if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
+           !ctx->dc->debug.disable_dmub_reallow_idle)
                dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
 
        return result;
@@ -1538,7 +1547,8 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
 
        result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
 
-       if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
+       if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
+           !ctx->dc->debug.disable_dmub_reallow_idle)
                dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
 
        return result;
index 60c93e9e353305f9a751001c7beb6f5610644b3c..c0a512a12531690632af8e5ad97f68d1dc2b6d9c 100644 (file)
@@ -51,6 +51,7 @@ struct dc_dmub_srv {
        struct dc_context *ctx;
        void *dm;
 
+       int32_t idle_exit_counter;
        bool idle_allowed;
        bool needs_idle_wake;
 };