INIT_LIST_HEAD(&adev->shadow_list);
        mutex_init(&adev->shadow_list_lock);
 
+       INIT_LIST_HEAD(&adev->reset_list);
+
        INIT_DELAYED_WORK(&adev->delayed_init_work,
                          amdgpu_device_delayed_init_work_handler);
        INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
        int r = 0;
 
        /*
-        * ASIC reset has to be done on all HGMI hive nodes ASAP
+        * ASIC reset has to be done on all XGMI hive nodes ASAP
         * to allow proper links negotiation in FW (within 1 sec)
         */
        if (!skip_hw_reset && need_full_reset) {
-               list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+               list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
                        /* For XGMI run all resets in parallel to speed up the process */
                        if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
                                if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
 
                /* For XGMI wait for all resets to complete before proceed */
                if (!r) {
-                       list_for_each_entry(tmp_adev, device_list_handle,
-                                           gmc.xgmi.head) {
+                       list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
                                if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
                                        flush_work(&tmp_adev->xgmi_reset_work);
                                        r = tmp_adev->asic_reset_res;
        }
 
        if (!r && amdgpu_ras_intr_triggered()) {
-               list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+               list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
                        if (tmp_adev->mmhub.funcs &&
                            tmp_adev->mmhub.funcs->reset_ras_error_count)
                                tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
                amdgpu_ras_intr_cleared();
        }
 
-       list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+       list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
                if (need_full_reset) {
                        /* post card */
                        if (amdgpu_device_asic_init(tmp_adev))
         */
        INIT_LIST_HEAD(&device_list);
        if (adev->gmc.xgmi.num_physical_nodes > 1) {
-               if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
-                       list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
-               device_list_handle = &hive->device_list;
+               list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+                       list_add_tail(&tmp_adev->reset_list, &device_list);
+               if (!list_is_first(&adev->reset_list, &device_list))
+                       list_rotate_to_front(&adev->reset_list, &device_list);
+               device_list_handle = &device_list;
        } else {
-               list_add_tail(&adev->gmc.xgmi.head, &device_list);
+               list_add_tail(&adev->reset_list, &device_list);
                device_list_handle = &device_list;
        }
 
        /* block all schedulers and reset given job's ring */
-       list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+       list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
                /*
                 * Try to put the audio codec into suspend state
                 * before gpu reset started.
        }
 
 retry: /* Rest of adevs pre asic reset from XGMI hive. */
-       list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+       list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
                r = amdgpu_device_pre_asic_reset(tmp_adev,
                                                 (tmp_adev == adev) ? job : NULL,
                                                 &need_full_reset);
 skip_hw_reset:
 
        /* Post ASIC reset for all devs .*/
-       list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+       list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
 
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = tmp_adev->rings[i];
        }
 
 skip_sched_resume:
-       list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+       list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
                /* unlock kfd: SRIOV would do it separately */
                if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
                        amdgpu_amdkfd_post_reset(tmp_adev);
        DRM_INFO("PCI error: slot reset callback!!\n");
 
        INIT_LIST_HEAD(&device_list);
-       list_add_tail(&adev->gmc.xgmi.head, &device_list);
+       list_add_tail(&adev->reset_list, &device_list);
 
        /* wait for asic to come out of reset */
        msleep(500);