if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
                kv_dpm_powergate_vce(adev, false);
-               /* turn the clocks on when encoding */
-               ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-                                                   AMD_CG_STATE_UNGATE);
-               if (ret)
-                       return ret;
                if (pi->caps_stable_p_state)
                        pi->vce_boot_level = table->count - 1;
                else
                        amdgpu_kv_send_msg_to_smc_with_parameter(adev,
                                                          PPSMC_MSG_VCEDPM_SetEnabledMask,
                                                          (1 << pi->vce_boot_level));
-
                kv_enable_vce_dpm(adev, true);
        } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
                kv_enable_vce_dpm(adev, false);
-               /* turn the clocks off when not encoding */
-               ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-                                                   AMD_CG_STATE_GATE);
-               if (ret)
-                       return ret;
                kv_dpm_powergate_vce(adev, true);
        }
 
 static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
 {
        struct kv_power_info *pi = kv_get_pi(adev);
-       int ret;
 
        if (pi->vce_power_gated == gate)
                return;
 
        pi->vce_power_gated = gate;
 
-       if (gate) {
-               if (pi->caps_vce_pg) {
-                       /* shutdown the VCE block */
-                       ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-                                                           AMD_PG_STATE_GATE);
-                       /* XXX: check for errors */
-                       /* power off the VCE block */
-                       amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
-               }
-       } else {
-               if (pi->caps_vce_pg) {
-                       /* power on the VCE block */
-                       amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
-                       /* re-init the VCE block */
-                       ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
-                                                           AMD_PG_STATE_UNGATE);
-                       /* XXX: check for errors */
-               }
-       }
+       if (!pi->caps_vce_pg)
+               return;
+
+       if (gate)
+               amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
+       else
+               amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
 }
 
 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
 
        kv_dpm_powergate_acp(adev, true);
        kv_dpm_powergate_samu(adev, true);
-       kv_dpm_powergate_vce(adev, true);
 
        return 0;
 }
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
 static int vce_v2_0_wait_for_idle(void *handle);
+static void vce_v2_0_init_cg(struct amdgpu_device *adev);
+static void vce_v2_0_disable_cg(struct amdgpu_device *adev);
+static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
+                                                               bool sw_cg);
+
 /**
  * vce_v2_0_ring_get_rptr - get read pointer
  *
        struct amdgpu_ring *ring;
        int r;
 
-       vce_v2_0_mc_resume(adev);
-
        /* set BUSY flag */
        WREG32_P(mmVCE_STATUS, 1, ~1);
 
+       vce_v2_0_init_cg(adev);
+       vce_v2_0_disable_cg(adev);
+
+       vce_v2_0_mc_resume(adev);
+
        ring = &adev->vce.ring[0];
        WREG32(mmVCE_RB_RPTR, ring->wptr);
        WREG32(mmVCE_RB_WPTR, ring->wptr);
        return 0;
 }
 
+static int vce_v2_0_stop(struct amdgpu_device *adev)
+{
+       int i, j;
+       int status;
+
+       if (vce_v2_0_lmi_clean(adev)) {
+               DRM_INFO("vce is not idle \n");
+               return 0;
+       }
+/*
+       for (i = 0; i < 10; ++i) {
+               for (j = 0; j < 100; ++j) {
+                       status = RREG32(mmVCE_FW_REG_STATUS);
+                       if (!(status & 1))
+                               break;
+                       mdelay(1);
+               }
+               break;
+       }
+*/
+       if (vce_v2_0_wait_for_idle(adev)) {
+               DRM_INFO("VCE is busy, Can't set clock gateing");
+               return 0;
+       }
+
+       /* Stall UMC and register bus before resetting VCPU */
+       WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
+
+       for (i = 0; i < 10; ++i) {
+               for (j = 0; j < 100; ++j) {
+                       status = RREG32(mmVCE_LMI_STATUS);
+                       if (status & 0x240)
+                               break;
+                       mdelay(1);
+               }
+               break;
+       }
+
+       WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
+
+       /* put LMI, VCPU, RBC etc... into reset */
+       WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
+
+       WREG32(mmVCE_STATUS, 0);
+
+       return 0;
+}
+
 static int vce_v2_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = vce_v2_0_start(adev);
-       /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
-       if (r)
-               return 0;
-
+       amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+       vce_v2_0_enable_mgcg(adev, true, false);
        for (i = 0; i < adev->vce.num_rings; i++)
                adev->vce.ring[i].ready = false;
 
 
 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
 {
-       if (vce_v2_0_wait_for_idle(adev)) {
-               DRM_INFO("VCE is busy, Can't set clock gateing");
-               return;
-       }
-
-       WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
+       u32 orig, tmp;
 
-       if (vce_v2_0_lmi_clean(adev)) {
-               DRM_INFO("LMI is busy, Can't set clock gateing");
-               return;
-       }
-
-       WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-       WREG32_P(mmVCE_SOFT_RESET,
-                VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-                ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-       WREG32(mmVCE_STATUS, 0);
+/* LMI_MC/LMI_UMC always set in dynamic,
+ * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
+ */
+       tmp = RREG32(mmVCE_CLOCK_GATING_B);
+       tmp &= ~0x00060006;
 
-       if (gated)
-               WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
-       /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
+/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
        if (gated) {
-               /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
-               WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
+               tmp |= 0xe10000;
+               WREG32(mmVCE_CLOCK_GATING_B, tmp);
        } else {
-               /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
-               WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
+               tmp |= 0xe1;
+               tmp &= ~0xe10000;
+               WREG32(mmVCE_CLOCK_GATING_B, tmp);
        }
 
-       /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
-       WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
+       orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+       tmp &= ~0x1fe000;
+       tmp &= ~0xff000000;
+       if (tmp != orig)
+               WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+       orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+       tmp &= ~0x3fc;
+       if (tmp != orig)
+               WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 
        /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
        WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
 
-       WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
-       if(!gated) {
-               WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-               mdelay(100);
-               WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
-               vce_v2_0_firmware_loaded(adev);
-               WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
-       }
+       if(gated)
+               WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
 }
 
 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
        WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
 }
 
-static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
+                                                               bool sw_cg)
 {
-       bool sw_cg = false;
-
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
                if (sw_cg)
                        vce_v2_0_set_sw_cg(adev, true);
 
        WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
        WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
-
-       vce_v2_0_init_cg(adev);
 }
 
 static bool vce_v2_0_is_idle(void *handle)
        return 0;
 }
 
-static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
-       u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
-       if (enable)
-               tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
-       else
-               tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
-
-       WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
-
-
 static int vce_v2_0_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
        bool gate = false;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
-
+       bool sw_cg = false;
 
-       vce_v2_0_set_bypass_mode(adev, enable);
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (state == AMD_CG_STATE_GATE)
+       if (state == AMD_CG_STATE_GATE) {
                gate = true;
+               sw_cg = true;
+       }
 
-       vce_v2_0_enable_mgcg(adev, gate);
+       vce_v2_0_enable_mgcg(adev, gate, sw_cg);
 
        return 0;
 }
         */
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
-               return 0;
-
        if (state == AMD_PG_STATE_GATE)
-               /* XXX do we need a vce_v2_0_stop()? */
-               return 0;
+               return vce_v2_0_stop(adev);
        else
                return vce_v2_0_start(adev);
 }