DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
        }
 
+       if (adev->dm.dc->caps.ips_support && adev->dm.dc->config.disable_ips == DMUB_IPS_ENABLE)
+               adev->dm.idle_workqueue = idle_create_workqueue(adev);
+
        if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
                adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
 
                adev->dm.vblank_control_workqueue = NULL;
        }
 
+       if (adev->dm.idle_workqueue) {
+               if (adev->dm.idle_workqueue->running) {
+                       adev->dm.idle_workqueue->enable = false;
+                       flush_work(&adev->dm.idle_workqueue->work);
+               }
+
+               kfree(adev->dm.idle_workqueue);
+               adev->dm.idle_workqueue = NULL;
+       }
+
        amdgpu_dm_destroy_drm_device(&adev->dm);
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 
        bool enable;
 };
 
+struct idle_workqueue {
+       struct work_struct work;
+       struct amdgpu_display_manager *dm;
+       bool enable;
+       bool running;
+};
+
 /**
  * struct amdgpu_dm_backlight_caps - Information about backlight
  *
         * Deferred work for vblank control events.
         */
        struct workqueue_struct *vblank_control_workqueue;
+       struct idle_workqueue *idle_workqueue;
 
        struct drm_atomic_state *cached_state;
        struct dc_state *cached_dc_state;
                                             struct drm_crtc *crtc);
 
 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth);
+struct idle_workqueue *idle_create_workqueue(struct amdgpu_device *adev);
 #endif /* __AMDGPU_DM_H__ */
 
 #include "amdgpu_dm_trace.h"
 #include "amdgpu_dm_debugfs.h"
 
+#define HPD_DETECTION_PERIOD_uS 5000000
+#define HPD_DETECTION_TIME_uS 1000
+
 void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
 {
        struct drm_crtc *crtc = &acrtc->base;
                struct amdgpu_dm_connector *aconn =
                        (struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context;
 
-               if (!aconn->disallow_edp_enter_psr)
+               if (!aconn->disallow_edp_enter_psr) {
+                       struct amdgpu_display_manager *dm = vblank_work->dm;
+
                        amdgpu_dm_psr_enable(vblank_work->stream);
+                       if (dm->idle_workqueue &&
+                           dm->dc->idle_optimizations_allowed &&
+                           dm->idle_workqueue->enable &&
+                           !dm->idle_workqueue->running)
+                               schedule_work(&dm->idle_workqueue->work);
+               }
        }
 }
 
+static void amdgpu_dm_idle_worker(struct work_struct *work)
+{
+       struct idle_workqueue *idle_work;
+
+       idle_work = container_of(work, struct idle_workqueue, work);
+       idle_work->dm->idle_workqueue->running = true;
+       fsleep(HPD_DETECTION_PERIOD_uS);
+       mutex_lock(&idle_work->dm->dc_lock);
+       while (idle_work->enable) {
+               if (!idle_work->dm->dc->idle_optimizations_allowed)
+                       break;
+
+               dc_allow_idle_optimizations(idle_work->dm->dc, false);
+
+               mutex_unlock(&idle_work->dm->dc_lock);
+               fsleep(HPD_DETECTION_TIME_uS);
+               mutex_lock(&idle_work->dm->dc_lock);
+
+               if (!amdgpu_dm_psr_is_active_allowed(idle_work->dm))
+                       break;
+
+               dc_allow_idle_optimizations(idle_work->dm->dc, true);
+               mutex_unlock(&idle_work->dm->dc_lock);
+               fsleep(HPD_DETECTION_PERIOD_uS);
+               mutex_lock(&idle_work->dm->dc_lock);
+       }
+       mutex_unlock(&idle_work->dm->dc_lock);
+       idle_work->dm->idle_workqueue->running = false;
+}
+
+struct idle_workqueue *idle_create_workqueue(struct amdgpu_device *adev)
+{
+       struct idle_workqueue *idle_work;
+
+       idle_work = kzalloc(sizeof(*idle_work), GFP_KERNEL);
+       if (ZERO_OR_NULL_PTR(idle_work))
+               return NULL;
+
+       idle_work->dm = &adev->dm;
+       idle_work->enable = false;
+       idle_work->running = false;
+       INIT_WORK(&idle_work->work, amdgpu_dm_idle_worker);
+
+       return idle_work;
+}
+
 static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
 {
        struct vblank_control_work *vblank_work =
 
        return dc_set_psr_allow_active(dm->dc, false);
 }
 
+/*
+ * amdgpu_dm_psr_is_active_allowed() - check if psr is allowed on any stream
+ * @dm:  pointer to amdgpu_display_manager
+ *
+ * Return: true if allowed
+ */
+
+bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
+{
+       unsigned int i;
+       bool allow_active = false;
+
+       for (i = 0; i < dm->dc->current_state->stream_count ; i++) {
+               struct dc_link *link;
+               struct dc_stream_state *stream = dm->dc->current_state->streams[i];
+
+               link = stream->link;
+               if (!link)
+                       continue;
+               if (link->psr_settings.psr_feature_enabled &&
+                   link->psr_settings.psr_allow_active) {
+                       allow_active = true;
+                       break;
+               }
+       }
+
+       return allow_active;
+}