mask = 1;
                        cu_bitmap = 0;
                        counter = 0;
-                       gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
+                       amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
 
                        for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
                                if (cu_info->bitmap[i][j] & mask) {
                        cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
                }
        }
-       gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 }
 
 
        mutex_lock(&adev->grbm_idx_mutex);
        /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
-       gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
 
        /* set mmRLC_LB_PARAMS = 0x003F_1006 */
 
        mutex_lock(&adev->grbm_idx_mutex);
        /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
-       gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
 
        /* set mmRLC_LB_PARAMS = 0x003F_1006 */
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-                       gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
+                       amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
                        data = gfx_v9_0_get_rb_active_bitmap(adev);
                        active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
                                               rb_bitmap_width_per_sh);
                }
        }
-       gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        adev->gfx.config.backend_enable_mask = active_rbs;
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-                       gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
+                       amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
                        for (k = 0; k < adev->usec_timeout; k++) {
                                if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
                                        break;
                                udelay(1);
                        }
                        if (k == adev->usec_timeout) {
-                               gfx_v9_0_select_se_sh(adev, 0xffffffff,
+                               amdgpu_gfx_select_se_sh(adev, 0xffffffff,
                                                      0xffffffff, 0xffffffff);
                                mutex_unlock(&adev->grbm_idx_mutex);
                                DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
                        }
                }
        }
-       gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
        for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
                for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
                        for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
-                               gfx_v9_0_select_se_sh(adev, j, 0x0, k);
+                               amdgpu_gfx_select_se_sh(adev, j, 0x0, k);
                                RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
                        }
                }
        for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
                for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
                        for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
-                               gfx_v9_0_select_se_sh(adev, j, 0, k);
+                               amdgpu_gfx_select_se_sh(adev, j, 0, k);
                                reg_value =
                                        RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
                                if (reg_value)
        err_data->ce_count += sec_count;
        err_data->ue_count += ded_count;
 
-       gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        gfx_v9_0_query_utc_edc_status(adev, err_data);
                        mask = 1;
                        ao_bitmap = 0;
                        counter = 0;
-                       gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
+                       amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
                        gfx_v9_0_set_user_cu_inactive_bitmap(
                                adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
                        bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
                        cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
                }
        }
-       gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        cu_info->number = active_cu_number;