return 0;
 }
 
+int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+       int r, i;
+
+       r = amdgpu_ras_block_late_init(adev, ras_block);
+       if (r)
+               return r;
+
+       if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+               for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+                       if (adev->vcn.harvest_config & (1 << i))
+                               continue;
+
+                       r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
+                       if (r)
+                               goto late_fini;
+               }
+       }
+       return 0;
+
+late_fini:
+       amdgpu_ras_block_late_fini(adev, ras_block);
+       return r;
+}
+
 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
 {
        int err;
        adev->vcn.ras_if = &ras->ras_block.ras_comm;
 
        if (!ras->ras_block.ras_late_init)
-               ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+               ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
 
        return 0;
 }
 
        struct amdgpu_ring      ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
        atomic_t                sched_score;
        struct amdgpu_irq_src   irq;
+       struct amdgpu_irq_src   ras_poison_irq;
        struct amdgpu_vcn_reg   external;
        struct amdgpu_bo        *dpg_sram_bo;
        struct dpg_pause_state  pause_state;
 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
                        struct amdgpu_irq_src *source,
                        struct amdgpu_iv_entry *entry);
+int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev,
+                       struct ras_common_if *ras_block);
 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
 
 #endif