void amdgpu_gfx_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
 {
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
-                       adev->gfx.ras_if)
-               amdgpu_ras_block_late_fini(adev, adev->gfx.ras_if);
+                       ras_block)
+               amdgpu_ras_block_late_fini(adev, ras_block);
 }
 
 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
 
 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
 {
        if (adev->umc.ras && adev->umc.ras->ras_block.ras_fini)
-               adev->umc.ras->ras_block.ras_fini(adev, NULL);
+               adev->umc.ras->ras_block.ras_fini(adev, adev->umc.ras_if);
 
        if (adev->mmhub.ras && adev->mmhub.ras->ras_block.ras_fini)
-               adev->mmhub.ras->ras_block.ras_fini(adev, NULL);
+               adev->mmhub.ras->ras_block.ras_fini(adev, adev->mmhub.ras_if);
 
        if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_fini)
-               adev->gmc.xgmi.ras->ras_block.ras_fini(adev, NULL);
+               adev->gmc.xgmi.ras->ras_block.ras_fini(adev, adev->gmc.xgmi.ras_if);
 
        if (adev->hdp.ras && adev->hdp.ras->ras_block.ras_fini)
-               adev->hdp.ras->ras_block.ras_fini(adev, NULL);
+               adev->hdp.ras->ras_block.ras_fini(adev, adev->hdp.ras_if);
 }
 
        /*
 
 void amdgpu_hdp_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
 {
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP) &&
-           adev->hdp.ras_if)
-               amdgpu_ras_block_late_fini(adev, adev->hdp.ras_if);
+           ras_block)
+               amdgpu_ras_block_late_fini(adev, ras_block);
 }
 
 void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
 {
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
-                       adev->mmhub.ras_if)
-               amdgpu_ras_block_late_fini(adev, adev->mmhub.ras_if);
+                       ras_block)
+               amdgpu_ras_block_late_fini(adev, ras_block);
 }
 
 void amdgpu_nbio_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
 {
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) &&
-                       adev->nbio.ras_if)
-               amdgpu_ras_block_late_fini(adev, adev->nbio.ras_if);
+                       ras_block)
+               amdgpu_ras_block_late_fini(adev, ras_block);
 }
 
 void amdgpu_sdma_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
 {
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
-                       adev->sdma.ras_if)
-               amdgpu_ras_block_late_fini(adev, adev->sdma.ras_if);
+                       ras_block)
+               amdgpu_ras_block_late_fini(adev, ras_block);
 }
 
 int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
 
 void amdgpu_umc_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
 {
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
-                       adev->umc.ras_if)
-               amdgpu_ras_block_late_fini(adev, adev->umc.ras_if);
+                       ras_block)
+               amdgpu_ras_block_late_fini(adev, ras_block);
 }
 
 int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
 
 static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
 {
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
-                       adev->gmc.xgmi.ras_if)
-               amdgpu_ras_block_late_fini(adev, adev->gmc.xgmi.ras_if);
+                       ras_block)
+               amdgpu_ras_block_late_fini(adev, ras_block);
 }
 
 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
 
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_fini)
-               adev->gfx.ras->ras_block.ras_fini(adev, NULL);
+               adev->gfx.ras->ras_block.ras_fini(adev, adev->gfx.ras_if);
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
 
 
        if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
                adev->sdma.ras->ras_block.ras_fini)
-               adev->sdma.ras->ras_block.ras_fini(adev, NULL);
+               adev->sdma.ras->ras_block.ras_fini(adev, adev->sdma.ras_if);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->nbio.ras && adev->nbio.ras->ras_block.ras_fini)
-               adev->nbio.ras->ras_block.ras_fini(adev, NULL);
+               adev->nbio.ras->ras_block.ras_fini(adev, adev->nbio.ras_if);
 
        if (adev->df.funcs &&
            adev->df.funcs->sw_fini)