if (r)
                return r;
 
+       /* UVD ENC TRAP */
+       if (uvd_v6_0_enc_support(adev)) {
+               for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+                       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq);
+                       if (r)
+                               return r;
+               }
+       }
+
        r = amdgpu_uvd_sw_init(adev);
        if (r)
                return r;
                                      struct amdgpu_irq_src *source,
                                      struct amdgpu_iv_entry *entry)
 {
+       bool int_handled = true;
        DRM_DEBUG("IH: UVD TRAP\n");
-       amdgpu_fence_process(&adev->uvd.ring);
+
+       switch (entry->src_id) {
+       case 124:
+               amdgpu_fence_process(&adev->uvd.ring);
+               break;
+       case 119:
+               if (likely(uvd_v6_0_enc_support(adev)))
+                       amdgpu_fence_process(&adev->uvd.ring_enc[0]);
+               else
+                       int_handled = false;
+               break;
+       case 120:
+               if (likely(uvd_v6_0_enc_support(adev)))
+                       amdgpu_fence_process(&adev->uvd.ring_enc[1]);
+               else
+                       int_handled = false;
+               break;
+       }
+
+       if (false == int_handled)
+                       DRM_ERROR("Unhandled interrupt: %d %d\n",
+                         entry->src_id, entry->src_data[0]);
+
        return 0;
 }
 
 
 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->uvd.irq.num_types = 1;
+       if (uvd_v6_0_enc_support(adev))
+               adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
+       else
+               adev->uvd.irq.num_types = 1;
+
        adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
 }