mutex_unlock(&adev->dm.dc_lock);
 }
 
+static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc)
+{
+       if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter)
+               dc_exit_ips_for_hw_access(dc);
+}
+
 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
                       u32 value, const char *func_name)
 {
                return;
        }
 #endif
+
+       amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
        cgs_write_register(ctx->cgs_device, address, value);
        trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
 }
                return 0;
        }
 
+       amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
+
        value = cgs_read_register(ctx->cgs_device, address);
 
        trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);