*
  */
 
-#include "amdgpu.h"
 #include "amdgpu_ras.h"
 
 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, void *ras_ih_info)
        adev->gmc.umc_ras_if = NULL;
        return r;
 }
+
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+               void *ras_error_status,
+               struct amdgpu_iv_entry *entry)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+       if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+               return AMDGPU_RAS_SUCCESS;
+
+       kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+       if (adev->umc.funcs &&
+           adev->umc.funcs->query_ras_error_count)
+           adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
+
+       if (adev->umc.funcs &&
+           adev->umc.funcs->query_ras_error_address &&
+           adev->umc.max_ras_err_cnt_per_query) {
+               err_data->err_addr =
+                       kcalloc(adev->umc.max_ras_err_cnt_per_query,
+                               sizeof(struct eeprom_table_record), GFP_KERNEL);
+               /* still call query_ras_error_address to clear error status
+                * even NOMEM error is encountered
+                */
+               if(!err_data->err_addr)
+                       DRM_WARN("Failed to alloc memory for umc error address record!\n");
+
+               /* umc query_ras_error_address is also responsible for clearing
+                * error status
+                */
+               adev->umc.funcs->query_ras_error_address(adev, ras_error_status);
+       }
+
+       /* only uncorrectable error needs gpu reset */
+       if (err_data->ue_count) {
+               if (err_data->err_addr_cnt &&
+                   amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+                                               err_data->err_addr_cnt))
+                       DRM_WARN("Failed to add ras bad page!\n");
+
+               amdgpu_ras_reset_gpu(adev, 0);
+       }
+
+       kfree(err_data->err_addr);
+       return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+               struct amdgpu_irq_src *source,
+               struct amdgpu_iv_entry *entry)
+{
+       struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
+       struct ras_dispatch_if ih_data = {
+               .entry = entry,
+       };
+
+       if (!ras_if)
+               return 0;
+
+       ih_data.head = *ras_if;
+
+       amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+       return 0;
+}
 
        return 0;
 }
 
-static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
-               void *ras_error_status,
-               struct amdgpu_iv_entry *entry)
-{
-       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
-
-       if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
-               return AMDGPU_RAS_SUCCESS;
-
-       kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-       if (adev->umc.funcs &&
-           adev->umc.funcs->query_ras_error_count)
-           adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
-
-       if (adev->umc.funcs &&
-           adev->umc.funcs->query_ras_error_address &&
-           adev->umc.max_ras_err_cnt_per_query) {
-               err_data->err_addr =
-                       kcalloc(adev->umc.max_ras_err_cnt_per_query,
-                               sizeof(struct eeprom_table_record), GFP_KERNEL);
-               /* still call query_ras_error_address to clear error status
-                * even NOMEM error is encountered
-                */
-               if(!err_data->err_addr)
-                       DRM_WARN("Failed to alloc memory for umc error address record!\n");
-
-               /* umc query_ras_error_address is also responsible for clearing
-                * error status
-                */
-               adev->umc.funcs->query_ras_error_address(adev, err_data);
-       }
-
-       /* only uncorrectable error needs gpu reset */
-       if (err_data->ue_count) {
-               if (err_data->err_addr_cnt &&
-                   amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
-                                               err_data->err_addr_cnt))
-                       DRM_WARN("Failed to add ras bad page!\n");
-
-               amdgpu_ras_reset_gpu(adev, 0);
-       }
-
-       kfree(err_data->err_addr);
-       return AMDGPU_RAS_SUCCESS;
-}
-
-static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
-               struct amdgpu_irq_src *source,
-               struct amdgpu_iv_entry *entry)
-{
-       struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
-       struct ras_dispatch_if ih_data = {
-               .entry = entry,
-       };
-
-       if (!ras_if)
-               return 0;
-
-       ih_data.head = *ras_if;
-
-       amdgpu_ras_interrupt_dispatch(adev, &ih_data);
-       return 0;
-}
-
 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *src,
                                        unsigned type,
 
 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
        .set = gmc_v9_0_ecc_interrupt_state,
-       .process = gmc_v9_0_process_ecc_irq,
+       .process = amdgpu_umc_process_ecc_irq,
 };
 
 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct ras_ih_if umc_ih_info = {
-               .cb = gmc_v9_0_process_ras_data_cb,
+               .cb = amdgpu_umc_process_ras_data_cb,
        };
 
        if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {