INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
        mutex_init(&adev->vcn.vcn_pg_lock);
        atomic_set(&adev->vcn.total_submission_cnt, 0);
+       for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+               atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
 
        switch (adev->asic_type) {
        case CHIP_RAVEN:
                if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
                        struct dpg_pause_state new_state;
 
-                       if (fence[j])
+                       if (fence[j] ||
+                               unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
                                new_state.fw_based = VCN_DPG_STATE__PAUSE;
                        else
                                new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
                struct dpg_pause_state new_state;
-               unsigned int fences = 0;
-               unsigned int i;
 
-               for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-                       fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
-               }
-               if (fences)
+               if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+                       atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
                        new_state.fw_based = VCN_DPG_STATE__PAUSE;
-               else
-                       new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+               } else {
+                       unsigned int fences = 0;
+                       unsigned int i;
 
-               if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
-                       new_state.fw_based = VCN_DPG_STATE__PAUSE;
+                       for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+                               fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
+
+                       if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
+                               new_state.fw_based = VCN_DPG_STATE__PAUSE;
+                       else
+                               new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+               }
 
                adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
        }
 
 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 {
+       if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+               ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+               atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
        atomic_dec(&ring->adev->vcn.total_submission_cnt);
 
        schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);