spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 
-       if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
-               DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
-                                                amdgpu_crtc->pflip_status,
-                                                AMDGPU_FLIP_SUBMITTED,
-                                                amdgpu_crtc->crtc_id,
-                                                amdgpu_crtc);
+       if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+               DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
+                            amdgpu_crtc->pflip_status,
+                            AMDGPU_FLIP_SUBMITTED,
+                            amdgpu_crtc->crtc_id,
+                            amdgpu_crtc);
                spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                return;
        }
 }
 
 /* Prototypes of private functions */
-static int dm_early_init(void* handle);
+static int dm_early_init(void *handle);
 
 /* Allocate memory for FBC compressed data  */
 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
        pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
        pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
 
-       pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
+       pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
        pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
        pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
 
                DP_TEST_RESPONSE,
                &test_response.raw,
                sizeof(test_response));
-       }
-       else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+       } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
                        dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
                        dc_link_dp_allow_hpd_rx_irq(dc_link)) {
                /* offload_work->data is from handle_hpd_rx_irq->
        mutex_init(&adev->dm.dc_lock);
        mutex_init(&adev->dm.audio_lock);
 
-       if(amdgpu_dm_irq_init(adev)) {
+       if (amdgpu_dm_irq_init(adev)) {
                DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
                goto error;
        }
        if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
                adev->dm.dc->debug.disable_stutter = true;
 
-       if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+       if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
                adev->dm.dc->debug.disable_dsc = true;
-       }
 
        if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
                adev->dm.dc->debug.disable_clock_gate = true;
        mutex_destroy(&adev->dm.audio_lock);
        mutex_destroy(&adev->dm.dc_lock);
        mutex_destroy(&adev->dm.dpia_aux_lock);
-
-       return;
 }
 
 static int load_dmcu_fw(struct amdgpu_device *adev)
        int r;
        const struct dmcu_firmware_header_v1_0 *hdr;
 
-       switch(adev->asic_type) {
+       switch (adev->asic_type) {
 #if defined(CONFIG_DRM_AMD_DC_SI)
        case CHIP_TAHITI:
        case CHIP_PITCAIRN:
                struct dc_scaling_info scaling_infos[MAX_SURFACES];
                struct dc_flip_addrs flip_addrs[MAX_SURFACES];
                struct dc_stream_update stream_update;
-       } * bundle;
+       } *bundle;
        int k, m;
 
        bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
 
 cleanup:
        kfree(bundle);
-
-       return;
 }
 
 static int dm_resume(void *handle)
        .set_powergating_state = dm_set_powergating_state,
 };
 
-const struct amdgpu_ip_block_version dm_ip_block =
-{
+const struct amdgpu_ip_block_version dm_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_DCE,
        .major = 1,
        .minor = 0,
        caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
        caps->aux_support = false;
 
-       if (caps->ext_caps->bits.oled == 1 /*||
-           caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
-           caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
+       if (caps->ext_caps->bits.oled == 1
+           /*
+            * ||
+            * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+            * caps->ext_caps->bits.hdr_aux_backlight_control == 1
+            */)
                caps->aux_support = true;
 
        if (amdgpu_backlight == 0)
                process_count < max_process_count) {
                u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
                u8 retry;
+
                dret = 0;
 
                process_count++;
                aconnector = to_amdgpu_dm_connector(connector);
                dc_link = aconnector->dc_link;
 
-               if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+               if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
                        int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
                        int_params.irq_source = dc_link->irq_source_hpd;
 
                                        (void *) aconnector);
                }
 
-               if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+               if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
 
                        /* Also register for DP short pulse (hpd_rx). */
                        int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
        struct dc_interrupt_params int_params = {0};
        int r;
        int i;
-       unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+       unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
 
        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
         *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
         *    coming from DC hardware.
         *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
-        *    for acknowledging and handling. */
+        *    for acknowledging and handling.
+        */
 
        /* Use VBLANK interrupt */
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
-               r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
+               r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
                if (r) {
                        DRM_ERROR("Failed to add crtc irq id!\n");
                        return r;
 
                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
                int_params.irq_source =
-                       dc_interrupt_to_irq_source(dc, i+1 , 0);
+                       dc_interrupt_to_irq_source(dc, i + 1, 0);
 
                c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
 
        struct dc_interrupt_params int_params = {0};
        int r;
        int i;
-       unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+       unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
 
        if (adev->family >= AMDGPU_FAMILY_AI)
                client_id = SOC15_IH_CLIENTID_DCE;
         *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
         *    coming from DC hardware.
         *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
-        *    for acknowledging and handling. */
+        *    for acknowledging and handling.
+        */
 
        /* Use VBLANK interrupt */
        for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
 }
 
 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
-                               unsigned *min, unsigned *max)
+                               unsigned int *min, unsigned int *max)
 {
        if (!caps)
                return 0;
 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
                                        uint32_t brightness)
 {
-       unsigned min, max;
+       unsigned int min, max;
 
        if (!get_brightness_range(caps, &min, &max))
                return brightness;
 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
                                      uint32_t brightness)
 {
-       unsigned min, max;
+       unsigned int min, max;
 
        if (!get_brightness_range(caps, &min, &max))
                return brightness;
 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
 {
        drm_atomic_private_obj_fini(&dm->atomic_obj);
-       return;
 }
 
 /******************************************************************************
 {
        enum dc_color_depth depth = timing_out->display_color_depth;
        int normalized_clk;
+
        do {
                normalized_clk = timing_out->pix_clk_100hz / 10;
                /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
 {
        struct dc_sink_init_data sink_init_data = { 0 };
        struct dc_sink *sink = NULL;
+
        sink_init_data.link = aconnector->dc_link;
        sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
 
                return &aconnector->freesync_vid_base;
 
        /* Find the preferred mode */
-       list_for_each_entry (m, list_head, head) {
+       list_for_each_entry(m, list_head, head) {
                if (m->type & DRM_MODE_TYPE_PREFERRED) {
                        m_pref = m;
                        break;
         * For some monitors, preferred mode is not the mode with highest
         * supported refresh rate.
         */
-       list_for_each_entry (m, list_head, head) {
+       list_for_each_entry(m, list_head, head) {
                current_refresh  = drm_mode_vrefresh(m);
 
                if (m->hdisplay == m_pref->hdisplay &&
                 * This may not be an error, the use case is when we have no
                 * usermode calls to reset and set mode upon hotplug. In this
                 * case, we call set mode ourselves to restore the previous mode
-                * and the modelist may not be filled in in time.
+                * and the modelist may not be filled in time.
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
                drm_mode_set_crtcinfo(&mode, 0);
 
        /*
-       * If scaling is enabled and refresh rate didn't change
-       * we copy the vic and polarities of the old timings
-       */
+        * If scaling is enabled and refresh rate didn't change
+        * we copy the vic and polarities of the old timings
+        */
        if (!scale || mode_refresh != preferred_refresh)
                fill_stream_properties_from_drm_display_mode(
                        stream, &mode, &aconnector->base, con_state, NULL,
 
        if (!state->duplicated) {
                int max_bpc = conn_state->max_requested_bpc;
+
                is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
                          aconnector->force_yuv420_output;
                color_depth = convert_color_depth_from_display_info(connector,
 {
        struct drm_display_mode *m;
 
-       list_for_each_entry (m, &aconnector->base.probed_modes, head) {
+       list_for_each_entry(m, &aconnector->base.probed_modes, head) {
                if (drm_mode_equal(m, mode))
                        return true;
        }
 
        link->priv = aconnector;
 
-       DRM_DEBUG_DRIVER("%s()\n", __func__);
 
        i2c = create_i2c(link->ddc, link->link_index, &res);
        if (!i2c) {
                         * DRI3/Present extension with defined target_msc.
                         */
                        last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
-               }
-               else {
+               } else {
                        /* For variable refresh rate mode only:
                         * Get vblank of last completed flip to avoid > 1 vrr
                         * flips per video frame by use of throttling, but allow
                dc_resource_state_copy_construct_current(dm->dc, dc_state);
        }
 
-       for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
-                                      new_crtc_state, i) {
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+                                     new_crtc_state, i) {
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 
                drm_dbg_state(state->dev,
-                       "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
-                       "planes_changed:%d, mode_changed:%d,active_changed:%d,"
-                       "connectors_changed:%d\n",
+                       "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
                        acrtc->crtc_id,
                        new_crtc_state->enable,
                        new_crtc_state->active,
                                        &commit->flip_done, 10*HZ);
 
                if (ret == 0)
-                       DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
-                                 "timed out\n", crtc->base.id, crtc->name);
+                       DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+                                 crtc->base.id, crtc->name);
 
                drm_crtc_commit_put(commit);
        }
        return false;
 }
 
-static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
+static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
+{
        u64 num, den, res;
        struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
 
                goto skip_modeset;
 
        drm_dbg_state(state->dev,
-               "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
-               "planes_changed:%d, mode_changed:%d,active_changed:%d,"
-               "connectors_changed:%d\n",
+               "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
                acrtc->crtc_id,
                new_crtc_state->enable,
                new_crtc_state->active,
                                                     old_crtc_state)) {
                        new_crtc_state->mode_changed = false;
                        DRM_DEBUG_DRIVER(
-                               "Mode change not required for front porch change, "
-                               "setting mode_changed to %d",
+                               "Mode change not required for front porch change, setting mode_changed to %d",
                                new_crtc_state->mode_changed);
 
                        set_freesync_fixed_config(dm_new_crtc_state);
                        struct drm_display_mode *high_mode;
 
                        high_mode = get_highest_refresh_rate_mode(aconnector, false);
-                       if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
+                       if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
                                set_freesync_fixed_config(dm_new_crtc_state);
-                       }
                }
 
                ret = dm_atomic_get_state(state, &dm_state);
         */
        for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
                struct amdgpu_framebuffer *old_afb, *new_afb;
+
                if (other->type == DRM_PLANE_TYPE_CURSOR)
                        continue;
 
        }
 
        /* Core DRM takes care of checking FB modifiers, so we only need to
-        * check tiling flags when the FB doesn't have a modifier. */
+        * check tiling flags when the FB doesn't have a modifier.
+        */
        if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
                if (adev->family < AMDGPU_FAMILY_AI) {
                        linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
-                                AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
+                                AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
                                 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
                } else {
                        linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
        /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
         * cursor per pipe but it's going to inherit the scaling and
         * positioning from the underlying pipe. Check the cursor plane's
-        * blending properties match the underlying planes'. */
+        * blending properties match the underlying planes'.
+        */
 
        new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
-       if (!new_cursor_state || !new_cursor_state->fb) {
+       if (!new_cursor_state || !new_cursor_state->fb)
                return 0;
-       }
 
        dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
        cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
        struct drm_connector_state *conn_state, *old_conn_state;
        struct amdgpu_dm_connector *aconnector = NULL;
        int i;
+
        for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
                if (!conn_state->crtc)
                        conn_state = old_conn_state;
        }
 
        /* Store the overall update type for use later in atomic check. */
-       for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
                struct dm_crtc_state *dm_new_crtc_state =
                        to_dm_crtc_state(new_crtc_state);
 
        else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
                DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
        else
-               DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
+               DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
 
        trace_amdgpu_dm_atomic_check_finish(state, ret);