return ret;
}
+static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (!smu->adev->enable_umsch_mm)
+ return 0;
+
+ if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->umsch_mm_gated, !enable);
+
+ return ret;
+}
+
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
+ atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
smu_dpm_set_vpe_enable(smu, true);
+ smu_dpm_set_umsch_mm_enable(smu, true);
smu_set_gfx_cgpg(smu, true);
}
smu_dpm_set_vcn_enable(smu, false);
smu_dpm_set_jpeg_enable(smu, false);
smu_dpm_set_vpe_enable(smu, false);
+ smu_dpm_set_umsch_mm_enable(smu, false);
adev->vcn.cur_state = AMD_PG_STATE_GATE;
adev->jpeg.cur_state = AMD_PG_STATE_GATE;