for (i = 0; i < adev->vcn.num_enc_rings; ++i)
                        fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
 
-               if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
+               /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+               if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+                   !adev->vcn.using_unified_queue) {
                        struct dpg_pause_state new_state;
 
                        if (fence[j] ||
        amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
               AMD_PG_STATE_UNGATE);
 
-       if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
+       /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+       if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+           !adev->vcn.using_unified_queue) {
                struct dpg_pause_state new_state;
 
                if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
 
 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
+       /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
        if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
-               ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+           ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
+           !adev->vcn.using_unified_queue)
                atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 
        atomic_dec(&ring->adev->vcn.total_submission_cnt);