}
 }
 
+/*
+ * This function is called in all state_mutex unlock cases to
+ * handle a 'deferred_reset' if exists.
+ */
+static void
+hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
+{
+again:
+       spin_lock(&hisi_acc_vdev->reset_lock);
+       if (hisi_acc_vdev->deferred_reset) {
+               hisi_acc_vdev->deferred_reset = false;
+               spin_unlock(&hisi_acc_vdev->reset_lock);
+               hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
+               hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+               hisi_acc_vf_disable_fds(hisi_acc_vdev);
+               goto again;
+       }
+       mutex_unlock(&hisi_acc_vdev->state_mutex);
+       spin_unlock(&hisi_acc_vdev->reset_lock);
+}
+
 static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
 {
        struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
                        break;
                }
        }
-       mutex_unlock(&hisi_acc_vdev->state_mutex);
+       hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
        return res;
 }
 
 
        mutex_lock(&hisi_acc_vdev->state_mutex);
        *curr_state = hisi_acc_vdev->mig_state;
-       mutex_unlock(&hisi_acc_vdev->state_mutex);
+       hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
        return 0;
 }
 
+static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
+{
+       struct hisi_acc_vf_core_device *hisi_acc_vdev = dev_get_drvdata(&pdev->dev);
+
+       if (hisi_acc_vdev->core_device.vdev.migration_flags !=
+                               VFIO_MIGRATION_STOP_COPY)
+               return;
+
+       /*
+        * As the higher VFIO layers are holding locks across reset and using
+        * those same locks with the mm_lock we need to prevent ABBA deadlock
+        * with the state_mutex and mm_lock.
+        * In case the state_mutex was taken already we defer the cleanup work
+        * to the unlock flow of the other running context.
+        */
+       spin_lock(&hisi_acc_vdev->reset_lock);
+       hisi_acc_vdev->deferred_reset = true;
+       if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
+               spin_unlock(&hisi_acc_vdev->reset_lock);
+               return;
+       }
+       spin_unlock(&hisi_acc_vdev->reset_lock);
+       hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
+}
+
 static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
 {
        struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
 
 MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
 
+static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
+       .reset_done = hisi_acc_vf_pci_aer_reset_done,
+       .error_detected = vfio_pci_core_aer_err_detected,
+};
+
 static struct pci_driver hisi_acc_vfio_pci_driver = {
        .name = KBUILD_MODNAME,
        .id_table = hisi_acc_vfio_pci_table,
        .probe = hisi_acc_vfio_pci_probe,
        .remove = hisi_acc_vfio_pci_remove,
-       .err_handler = &vfio_pci_core_err_handlers,
+       .err_handler = &hisi_acc_vf_err_handlers,
 };
 
 module_pci_driver(hisi_acc_vfio_pci_driver);