#include "smu_v11_0.h"
 #include "atom.h"
 
+int smu_feature_init_dpm(struct smu_context *smu)
+{
+       struct smu_feature *feature = &smu->smu_feature;
+       int ret = 0;
+       uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
+
+       bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
+
+       ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
+                                            SMU_FEATURE_MAX/32);
+       if (ret)
+               return ret;
+
+       bitmap_andnot(feature->allowed, feature->allowed,
+                     (unsigned long *)unallowed_feature_mask,
+                     feature->feature_num);
+
+       return ret;
+}
+
 static int smu_set_funcs(struct amdgpu_device *adev)
 {
        struct smu_context *smu = &adev->smu;
                return -EINVAL;
 
        smu->pool_size = adev->pm.smu_prv_buffer_size;
+       smu->smu_feature.feature_num = SMU_FEATURE_MAX;
+       bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
+       bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
+       bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
 
        ret = smu_init_microcode(smu);
        if (ret) {
        if (ret)
                return ret;
 
+       ret = smu_feature_set_allowed_mask(smu);
+       if (ret)
+               return ret;
+
        ret = smu_read_pptable_from_vbios(smu);
        if (ret)
                return ret;
        if (ret)
                return ret;
 
+       ret = smu_feature_enable_all(smu);
+       if (ret)
+               return ret;
+
        /*
         * Set min deep sleep dce fclk with bootup value from vbios via
         * SetMinDeepSleepDcefclk MSG.
 
        mutex_lock(&smu->mutex);
 
+       ret = smu_feature_init_dpm(smu);
+       if (ret)
+               goto failed;
+
        ret = smu_smc_table_hw_init(smu);
        if (ret)
                goto failed;
 
        uint32_t power_context_size;
 };
 
+
+#define SMU_FEATURE_MAX        (64)
+struct smu_feature
+{
+       uint32_t feature_num;
+       DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
+       DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
+       DECLARE_BITMAP(enabled, SMU_FEATURE_MAX);
+};
+
 struct smu_context
 {
        struct amdgpu_device            *adev;
        struct smu_table_context        smu_table;
        struct smu_dpm_context          smu_dpm;
        struct smu_power_context        smu_power;
+       struct smu_feature              smu_feature;
 };
 
 struct pptable_funcs {
        int (*append_powerplay_table)(struct smu_context *smu);
        int (*get_smu_msg_index)(struct smu_context *smu, uint32_t index);
        int (*run_afll_btc)(struct smu_context *smu);
+       int (*get_unallowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
 };
 
 struct smu_funcs
        int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, uint32_t param);
        int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
        int (*init_display)(struct smu_context *smu);
+       int (*set_allowed_mask)(struct smu_context *smu);
+       int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+       int (*enable_all_mask)(struct smu_context *smu);
+       int (*disable_all_mask)(struct smu_context *smu);
 
 };
 
        ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
 #define smu_init_display(smu) \
        ((smu)->funcs->init_display ? (smu)->funcs->init_display((smu)) : 0)
-
+#define smu_feature_set_allowed_mask(smu) \
+       ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0)
+#define smu_feature_get_enabled_mask(smu, mask, num) \
+       ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0)
+#define smu_feature_enable_all(smu) \
+       ((smu)->funcs->enable_all_mask? (smu)->funcs->enable_all_mask((smu)) : 0)
+#define smu_feature_disable_all(smu) \
+       ((smu)->funcs->disable_all_mask? (smu)->funcs->disable_all_mask((smu)) : 0)
 #define smu_store_powerplay_table(smu) \
        ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
 #define smu_check_powerplay_table(smu) \
        ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
 #define smu_run_afll_btc(smu) \
        ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0)
+#define smu_get_unallowed_feature_mask(smu, feature_mask, num) \
+       ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_unallowed_feature_mask? (smu)->ppt_funcs->get_unallowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
 
 extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
                                   uint16_t *size, uint8_t *frev, uint8_t *crev,
 extern const struct amd_ip_funcs smu_ip_funcs;
 
 extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
+extern int smu_feature_init_dpm(struct smu_context *smu);
 
 #endif
 
        return ret;
 }
 
+static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
+{
+       struct smu_feature *feature = &smu->smu_feature;
+       int ret = 0;
+       uint32_t feature_mask[2];
+
+       if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
+               return -EINVAL;
+
+       bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+
+       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
+                                         feature_mask[1]);
+       if (ret)
+               return ret;
+
+       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
+                                         feature_mask[0]);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+                                     uint32_t *feature_mask, uint32_t num)
+{
+       uint32_t feature_mask_high = 0, feature_mask_low = 0;
+       int ret = 0;
+
+       if (!feature_mask || num < 2)
+               return -EINVAL;
+
+       ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+       if (ret)
+               return ret;
+       ret = smu_read_smc_arg(smu, &feature_mask_high);
+       if (ret)
+               return ret;
+
+       ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+       if (ret)
+               return ret;
+       ret = smu_read_smc_arg(smu, &feature_mask_low);
+       if (ret)
+               return ret;
+
+       feature_mask[0] = feature_mask_low;
+       feature_mask[1] = feature_mask_high;
+
+       return ret;
+}
+
+static int smu_v11_0_enable_all_mask(struct smu_context *smu)
+{
+       struct smu_feature *feature = &smu->smu_feature;
+       uint32_t feature_mask[2];
+       int ret = 0;
+
+       ret = smu_send_smc_msg(smu, SMU_MSG_EnableAllSmuFeatures);
+       if (ret)
+               return ret;
+       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+       if (ret)
+               return ret;
+
+       bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+                   feature->feature_num);
+       bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+                   feature->feature_num);
+
+       return ret;
+}
+
+static int smu_v11_0_disable_all_mask(struct smu_context *smu)
+{
+       struct smu_feature *feature = &smu->smu_feature;
+       uint32_t feature_mask[2];
+       int ret = 0;
+
+       ret = smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
+       if (ret)
+               return ret;
+       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+       if (ret)
+               return ret;
+
+       bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+                   feature->feature_num);
+       bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+                   feature->feature_num);
+
+       return ret;
+}
+
 static const struct smu_funcs smu_v11_0_funcs = {
        .init_microcode = smu_v11_0_init_microcode,
        .load_microcode = smu_v11_0_load_microcode,
        .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
        .set_tool_table_location = smu_v11_0_set_tool_table_location,
        .init_display = smu_v11_0_init_display,
+       .set_allowed_mask = smu_v11_0_set_allowed_mask,
+       .get_enabled_mask = smu_v11_0_get_enabled_mask,
+       .enable_all_mask = smu_v11_0_enable_all_mask,
+       .disable_all_mask = smu_v11_0_disable_all_mask,
 };
 
 void smu_v11_0_set_smu_funcs(struct smu_context *smu)
 
        return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
 }
 
+static int
+vega20_get_unallowed_feature_mask(struct smu_context *smu,
+                                 uint32_t *feature_mask, uint32_t num)
+{
+       if (num > 2)
+               return -EINVAL;
+
+       feature_mask[0] = 0xE0041C00;
+       feature_mask[1] = 0xFFFFFFFE; /* bit32~bit63 is Unsupported */
+
+       return 0;
+}
+
 static const struct pptable_funcs vega20_ppt_funcs = {
        .alloc_dpm_context = vega20_allocate_dpm_context,
        .store_powerplay_table = vega20_store_powerplay_table,
        .append_powerplay_table = vega20_append_powerplay_table,
        .get_smu_msg_index = vega20_get_smu_msg_index,
        .run_afll_btc = vega20_run_btc_afll,
+       .get_unallowed_feature_mask = vega20_get_unallowed_feature_mask,
 };
 
 void vega20_set_ppt_funcs(struct smu_context *smu)