commit_params.streams = dc_state->streams;
                commit_params.stream_count = dc_state->stream_count;
+               dc_exit_ips_for_hw_access(dm->dc);
                WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
 
                dm_gpureset_commit_state(dm->cached_dc_state, dm);
                        emulated_link_detect(aconnector->dc_link);
                } else {
                        mutex_lock(&dm->dc_lock);
+                       dc_exit_ips_for_hw_access(dm->dc);
                        dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
                        mutex_unlock(&dm->dc_lock);
                }
        enum dc_connection_type new_connection_type = dc_connection_none;
        struct amdgpu_device *adev = drm_to_adev(dev);
        struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+       struct dc *dc = aconnector->dc_link->ctx->dc;
        bool ret = false;
 
        if (adev->dm.disable_hpd_irq)
                        drm_kms_helper_connector_hotplug_event(connector);
        } else {
                mutex_lock(&adev->dm.dc_lock);
+               dc_exit_ips_for_hw_access(dc);
                ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
                mutex_unlock(&adev->dm.dc_lock);
                if (ret) {
        bool has_left_work = false;
        int idx = dc_link->link_index;
        struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
+       struct dc *dc = aconnector->dc_link->ctx->dc;
 
        memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
 
                        bool ret = false;
 
                        mutex_lock(&adev->dm.dc_lock);
+                       dc_exit_ips_for_hw_access(dc);
                        ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
                        mutex_unlock(&adev->dm.dc_lock);
 
                        bool ret = false;
 
                        mutex_lock(&dm->dc_lock);
+                       dc_exit_ips_for_hw_access(dm->dc);
                        ret = dc_link_detect(link, DETECT_REASON_BOOT);
                        mutex_unlock(&dm->dc_lock);
 
 
                        memset(&position, 0, sizeof(position));
                        mutex_lock(&dm->dc_lock);
+                       dc_exit_ips_for_hw_access(dm->dc);
                        dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position);
                        mutex_unlock(&dm->dc_lock);
                }
 
        dm_enable_per_frame_crtc_master_sync(dc_state);
        mutex_lock(&dm->dc_lock);
+       dc_exit_ips_for_hw_access(dm->dc);
        WARN_ON(!dc_commit_streams(dm->dc, ¶ms));
 
        /* Allow idle optimization when vblank count is 0 for display off */
 
 
                mutex_lock(&dm->dc_lock);
+               dc_exit_ips_for_hw_access(dm->dc);
                dc_update_planes_and_stream(dm->dc,
                                            dummy_updates,
                                            status->plane_count,