]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/amd/display: fix dmub access race condition
authorAurabindo Pillai <aurabindo.pillai@amd.com>
Mon, 21 Jul 2025 15:03:39 +0000 (11:03 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 4 Aug 2025 18:38:34 +0000 (14:38 -0400)
Accessing DC from amdgpu_dm is usually preceded by acquisition of
dc_lock mutex. Most of the DC API that DM calls are under a DC lock.
However, there are a few that are not. Some DC API called from interrupt
context end up sending DMUB commands via a DC API, while other threads were
using DMUB. This was apparent from a race between calls for setting idle
optimization enable/disable and the DC API to set vmin/vmax.

Offload the call to dc_stream_adjust_vmin_vmax() to a thread instead
of directly calling them from the interrupt handler such that it waits
for dc_lock.

Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Signed-off-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
Signed-off-by: Roman Li <roman.li@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h

index c71167ffdb76b98f14f72dc4ce41098aab4ea06c..6762fc7de7692a49a15e9d25adb02c47ef925b3e 100644 (file)
@@ -530,6 +530,50 @@ static void dm_pflip_high_irq(void *interrupt_params)
                      amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
 }
 
+static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
+{
+       struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
+       struct amdgpu_device *adev = work->adev;
+       struct dc_stream_state *stream = work->stream;
+       struct dc_crtc_timing_adjust *adjust = work->adjust;
+
+       mutex_lock(&adev->dm.dc_lock);
+       dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
+       mutex_unlock(&adev->dm.dc_lock);
+
+       dc_stream_release(stream);
+       kfree(work->adjust);
+       kfree(work);
+}
+
+static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
+       struct dc_stream_state *stream,
+       struct dc_crtc_timing_adjust *adjust)
+{
+       struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
+       if (!offload_work) {
+               drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
+               return;
+       }
+
+       struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
+       if (!adjust_copy) {
+               drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
+               kfree(offload_work);
+               return;
+       }
+
+       dc_stream_retain(stream);
+       memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
+
+       INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
+       offload_work->adev = adev;
+       offload_work->stream = stream;
+       offload_work->adjust = adjust_copy;
+
+       queue_work(system_wq, &offload_work->work);
+}
+
 static void dm_vupdate_high_irq(void *interrupt_params)
 {
        struct common_irq_params *irq_params = interrupt_params;
@@ -579,10 +623,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
                                    acrtc->dm_irq_params.stream,
                                    &acrtc->dm_irq_params.vrr_params);
 
-                               dc_stream_adjust_vmin_vmax(
-                                   adev->dm.dc,
-                                   acrtc->dm_irq_params.stream,
-                                   &acrtc->dm_irq_params.vrr_params.adjust);
+                               schedule_dc_vmin_vmax(adev,
+                                       acrtc->dm_irq_params.stream,
+                                       &acrtc->dm_irq_params.vrr_params.adjust);
                                spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                        }
                }
@@ -672,8 +715,8 @@ static void dm_crtc_high_irq(void *interrupt_params)
                                             acrtc->dm_irq_params.stream,
                                             &acrtc->dm_irq_params.vrr_params);
 
-               dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
-                                          &acrtc->dm_irq_params.vrr_params.adjust);
+               schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
+                               &acrtc->dm_irq_params.vrr_params.adjust);
        }
 
        /*
index 67c3a7a967f2f20d06f0bb7a4a56d2b8c1bc849f..94f312bae9ac194cea6bbbac7c14dbc213f94720 100644 (file)
@@ -153,6 +153,20 @@ struct idle_workqueue {
        bool running;
 };
 
+/**
+ * struct dm_vupdate_work - Work data for periodic action in idle
+ * @work: Kernel work data for the work event
+ * @adev: amdgpu_device back pointer
+ * @stream: DC stream associated with the crtc
+ * @adjust: DC CRTC timing adjust to be applied to the crtc
+ */
+struct vupdate_offload_work {
+       struct work_struct work;
+       struct amdgpu_device *adev;
+       struct dc_stream_state *stream;
+       struct dc_crtc_timing_adjust *adjust;
+};
+
 #define MAX_LUMINANCE_DATA_POINTS 99
 
 /**