DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
 
+       /* Control PSR based on vblank requirements from OS */
+       if (vblank_work->stream && vblank_work->stream->link) {
+               if (vblank_work->enable) {
+                       if (vblank_work->stream->link->psr_settings.psr_allow_active)
+                               amdgpu_dm_psr_disable(vblank_work->stream);
+               } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
+                          !vblank_work->stream->link->psr_settings.psr_allow_active &&
+                          vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
+                       amdgpu_dm_psr_enable(vblank_work->stream);
+               }
+       }
+
        mutex_unlock(&dm->dc_lock);
+
+       dc_stream_release(vblank_work->stream);
+
        kfree(vblank_work);
 }
 
        work->acrtc = acrtc;
        work->enable = enable;
 
+       if (acrtc_state->stream) {
+               dc_stream_retain(acrtc_state->stream);
+               work->stream = acrtc_state->stream;
+       }
+
        queue_work(dm->vblank_control_workqueue, &work->work);
 #endif
 
        /* Update the planes if changed or disable if we don't have any. */
        if ((planes_count || acrtc_state->active_planes == 0) &&
                acrtc_state->stream) {
+               /*
+                * If PSR or idle optimizations are enabled then flush out
+                * any pending work before hardware programming.
+                */
+               flush_workqueue(dm->vblank_control_workqueue);
+
                bundle->stream_update.stream = acrtc_state->stream;
                if (new_pcrtc_state->mode_changed) {
                        bundle->stream_update.src = acrtc_state->stream->src;
                                acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
                                !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
                        amdgpu_dm_link_setup_psr(acrtc_state->stream);
-               else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
-                               acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
-                               !acrtc_state->stream->link->psr_settings.psr_allow_active) {
-                       struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
-                                       acrtc_state->stream->dm_stream_context;
+
+               /* Decrement skip count when PSR is enabled and we're doing fast updates. */
+               if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+                   acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+                       struct amdgpu_dm_connector *aconn =
+                               (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
 
                        if (aconn->psr_skip_count > 0)
                                aconn->psr_skip_count--;
-                       else
-                               amdgpu_dm_psr_enable(acrtc_state->stream);
+
+                       /* Allow PSR when skip count is 0. */
+                       acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
+               } else {
+                       acrtc_attach->dm_irq_params.allow_psr_entry = false;
                }
 
                mutex_unlock(&dm->dc_lock);
 
        if (dc_state) {
                /* if there mode set or reset, disable eDP PSR */
-               if (mode_set_reset_required)
+               if (mode_set_reset_required) {
+                       flush_workqueue(dm->vblank_control_workqueue);
                        amdgpu_dm_psr_disable_all(dm);
+               }
 
                dm_enable_per_frame_crtc_master_sync(dc_state);
                mutex_lock(&dm->dc_lock);