add sysfs interface of force_clk_levels sysfs for navi10.
Signed-off-by: Kevin Wang <kevin1.wang@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
                return ret;
 
        if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, PP_SCLK, mask);
+               ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
 
                return ret;
 
        if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, PP_MCLK, mask);
+               ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
 
                return ret;
 
        if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask);
+               ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
 
                return ret;
 
        if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask);
+               ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
 
                return ret;
 
        if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask);
+               ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
 
                return ret;
 
        if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask);
+               ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
 
        }
 
        if (is_support_sw_smu(adev)) {
-               value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value);
+               value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
        } else {
                if (adev->powerplay.pp_funcs->set_sclk_od)
                        amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
        }
 
        if (is_support_sw_smu(adev)) {
-               value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value);
+               value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
        } else {
                if (adev->powerplay.pp_funcs->set_mclk_od)
                        amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
 
        int (*set_power_state)(struct smu_context *smu);
        int (*populate_umd_state_clk)(struct smu_context *smu);
        int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
-       int (*force_clk_levels)(struct smu_context *smu, enum pp_clock_type type, uint32_t mask);
+       int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask);
        int (*set_default_od8_settings)(struct smu_context *smu);
        int (*update_specified_od8_value)(struct smu_context *smu,
                                          uint32_t index,
        ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
 #define smu_print_clk_levels(smu, clk_type, buf) \
        ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0)
-#define smu_force_clk_levels(smu, type, level) \
-       ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (type), (level)) : 0)
+#define smu_force_clk_levels(smu, clk_type, level) \
+       ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0)
 #define smu_get_od_percentage(smu, type) \
        ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)
 #define smu_set_od_percentage(smu, type, value) \
 
        return size;
 }
 
+static int navi10_force_clk_levels(struct smu_context *smu,
+                                  enum smu_clk_type clk_type, uint32_t mask)
+{
+
+       int ret = 0, size = 0;
+       uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
+
+       soft_min_level = mask ? (ffs(mask) - 1) : 0;
+       soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+       switch (clk_type) {
+       case SMU_GFXCLK:
+       case SMU_SOCCLK:
+       case SMU_MCLK:
+       case SMU_UCLK:
+       case SMU_DCEFCLK:
+       case SMU_FCLK:
+               ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
+               if (ret)
+                       return size;
+
+               ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
+               if (ret)
+                       return size;
+
+               ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+               if (ret)
+                       return size;
+               break;
+       default:
+               break;
+       }
+
+       return size;
+}
+
 static const struct pptable_funcs navi10_ppt_funcs = {
        .tables_init = navi10_tables_init,
        .alloc_dpm_context = navi10_allocate_dpm_context,
        .dpm_set_uvd_enable = navi10_dpm_set_uvd_enable,
        .get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table,
        .print_clk_levels = navi10_print_clk_levels,
+       .force_clk_levels = navi10_force_clk_levels,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
 
 }
 
 static int vega20_force_clk_levels(struct smu_context *smu,
-                       enum pp_clock_type type, uint32_t mask)
+                       enum  smu_clk_type clk_type, uint32_t mask)
 {
        struct vega20_dpm_table *dpm_table;
        struct vega20_single_dpm_table *single_dpm_table;
 
        dpm_table = smu->smu_dpm.dpm_context;
 
-       switch (type) {
-       case PP_SCLK:
+       switch (clk_type) {
+       case SMU_SCLK:
                single_dpm_table = &(dpm_table->gfx_table);
 
                if (soft_max_level >= single_dpm_table->count) {
 
                break;
 
-       case PP_MCLK:
+       case SMU_MCLK:
                single_dpm_table = &(dpm_table->mem_table);
 
                if (soft_max_level >= single_dpm_table->count) {
 
                break;
 
-       case PP_SOCCLK:
+       case SMU_SOCCLK:
                single_dpm_table = &(dpm_table->soc_table);
 
                if (soft_max_level >= single_dpm_table->count) {
 
                break;
 
-       case PP_FCLK:
+       case SMU_FCLK:
                single_dpm_table = &(dpm_table->fclk_table);
 
                if (soft_max_level >= single_dpm_table->count) {
 
                break;
 
-       case PP_DCEFCLK:
+       case SMU_DCEFCLK:
                hard_min_level = soft_min_level;
                single_dpm_table = &(dpm_table->dcef_table);
 
 
                break;
 
-       case PP_PCIE:
+       case SMU_PCIE:
                if (soft_min_level >= NUM_LINK_LEVELS ||
                    soft_max_level >= NUM_LINK_LEVELS) {
                        ret = -EINVAL;