atomic_set(&aca->ue_update_flag, 0);
 }
 
+int amdgpu_aca_reset(struct amdgpu_device *adev)
+{
+       struct amdgpu_aca *aca = &adev->aca;
+
+       atomic_set(&aca->ue_update_flag, 0);
+
+       return 0;
+}
+
 void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs)
 {
        struct amdgpu_aca *aca = &adev->aca;
 void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
 {
 #if defined(CONFIG_DEBUG_FS)
-       if (!root ||
-           (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6) &&
-            adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 14)))
+       if (!root)
                return;
 
        debugfs_create_file("aca_debug_mode", 0200, root, adev, &aca_debug_mode_fops);
 
        return 0;
 }
 
-static int amdgpu_mca_bank_set_remove_node(struct mca_bank_set *mca_set, struct mca_bank_node *node)
+static void amdgpu_mca_bank_set_remove_node(struct mca_bank_set *mca_set, struct mca_bank_node *node)
 {
        if (!node)
-               return -EINVAL;
+               return;
 
        list_del(&node->node);
        kvfree(node);
 
        mca_set->nr_entries--;
-
-       return 0;
 }
 
 static void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set)
 {
        struct mca_bank_node *node, *tmp;
 
-       list_for_each_entry_safe(node, tmp, &mca_set->list, node) {
-               list_del(&node->node);
-               kvfree(node);
-       }
+       if (list_empty(&mca_set->list))
+               return;
+
+       list_for_each_entry_safe(node, tmp, &mca_set->list, node)
+               amdgpu_mca_bank_set_remove_node(mca_set, node);
 }
 
 void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs)
 void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
 {
 #if defined(CONFIG_DEBUG_FS)
-       if (!root ||
-           (amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6) &&
-            amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 14)))
+       if (!root)
                return;
 
        debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops);
 
                            obj, &amdgpu_ras_debugfs_ops);
 }
 
+static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
+{
+       bool ret;
+
+       switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+       case IP_VERSION(13, 0, 6):
+       case IP_VERSION(13, 0, 14):
+               ret = true;
+               break;
+       default:
+               ret = false;
+               break;
+       }
+
+       return ret;
+}
+
 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
                }
        }
 
-       if (amdgpu_aca_is_enabled(adev))
-               amdgpu_aca_smu_debugfs_init(adev, dir);
-       else
-               amdgpu_mca_smu_debugfs_init(adev, dir);
+       if (amdgpu_ras_aca_is_supported(adev)) {
+               if (amdgpu_aca_is_enabled(adev))
+                       amdgpu_aca_smu_debugfs_init(adev, dir);
+               else
+                       amdgpu_mca_smu_debugfs_init(adev, dir);
+       }
 }
 
 /* debugfs end */
                goto release_con;
        }
 
+       if (amdgpu_ras_aca_is_supported(adev)) {
+               if (amdgpu_aca_is_enabled(adev))
+                       r = amdgpu_aca_init(adev);
+               else
+                       r = amdgpu_mca_init(adev);
+               if (r)
+                       goto release_con;
+       }
+
        dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
                 "hardware ability[%x] ras_mask[%x]\n",
                 adev->ras_hw_enabled, adev->ras_enabled);
 
        amdgpu_ras_event_mgr_init(adev);
 
-       if (amdgpu_aca_is_enabled(adev)) {
-               if (!amdgpu_in_reset(adev)) {
-                       r = amdgpu_aca_init(adev);
+       if (amdgpu_ras_aca_is_supported(adev)) {
+               if (amdgpu_in_reset(adev)) {
+                       if (amdgpu_aca_is_enabled(adev))
+                               r = amdgpu_aca_reset(adev);
+                       else
+                               r = amdgpu_mca_reset(adev);
                        if (r)
                                return r;
                }
 
-               if (!amdgpu_sriov_vf(adev))
-                       amdgpu_ras_set_aca_debug_mode(adev, false);
-       } else {
-               if (amdgpu_in_reset(adev))
-                       r = amdgpu_mca_reset(adev);
-               else
-                       r = amdgpu_mca_init(adev);
-               if (r)
-                       return r;
-
-               if (!amdgpu_sriov_vf(adev))
-                       amdgpu_ras_set_mca_debug_mode(adev, false);
+               if (!amdgpu_sriov_vf(adev)) {
+                       if (amdgpu_aca_is_enabled(adev))
+                               amdgpu_ras_set_aca_debug_mode(adev, false);
+                       else
+                               amdgpu_ras_set_mca_debug_mode(adev, false);
+               }
        }
 
        /* Guest side doesn't need init ras feature */
        amdgpu_ras_fs_fini(adev);
        amdgpu_ras_interrupt_remove_all(adev);
 
-       if (amdgpu_aca_is_enabled(adev))
-               amdgpu_aca_fini(adev);
-       else
-               amdgpu_mca_fini(adev);
+       if (amdgpu_ras_aca_is_supported(adev)) {
+               if (amdgpu_aca_is_enabled(adev))
+                       amdgpu_aca_fini(adev);
+               else
+                       amdgpu_mca_fini(adev);
+       }
 
        WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");