struct amdgpu_device *remote_adev = NULL;
        struct amdgpu_device *adev = ras->adev;
        struct list_head device_list, *device_list_handle =  NULL;
+       struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
 
+       if (hive)
+               atomic_set(&hive->ras_recovery, 1);
        if (!ras->disable_ras_err_cnt_harvest) {
-               struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
 
                /* Build list of devices to query RAS related errors */
                if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
                        amdgpu_ras_log_on_err_counter(remote_adev);
                }
 
-               amdgpu_put_xgmi_hive(hive);
        }
 
        if (amdgpu_device_should_recover_gpu(ras->adev)) {
                amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
        }
        atomic_set(&ras->in_recovery, 0);
+       if (hive) {
+               atomic_set(&hive->ras_recovery, 0);
+               amdgpu_put_xgmi_hive(hive);
+       }
 }
 
 /* alloc/realloc bps array */
 
 static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
+       struct amdgpu_hive_info *hive = NULL;
+       u32 hive_ras_recovery = 0;
        struct amdgpu_ras *ras;
        u32 fatal_err, param;
        int ret = 0;
 
+       hive = amdgpu_get_xgmi_hive(adev);
        ras = amdgpu_ras_get_context(adev);
        fatal_err = 0;
        param = SMU_RESET_MODE_1;
 
+       if (hive) {
+               hive_ras_recovery = atomic_read(&hive->ras_recovery);
+               amdgpu_put_xgmi_hive(hive);
+       }
+
        /* fatal error triggered by ras, PMFW supports the flag */
-       if (ras && atomic_read(&ras->in_recovery))
+       if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
                fatal_err = 1;
 
        param |= (fatal_err << 16);