struct blk_mq_tag_set   admin_tag_set;
        struct blk_mq_tag_set   tag_set;
 
+       struct work_struct      ioerr_work;
        struct delayed_work     connect_work;
 
        struct kref             ref;
        }
 }
 
+static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+       struct nvme_fc_ctrl *ctrl =
+                       container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+       nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
 static void
 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 {
 
 check_error:
        if (terminate_assoc)
-               nvme_fc_error_recovery(ctrl, "transport detected io error");
+               queue_work(nvme_reset_wq, &ctrl->ioerr_work);
 }
 
 static int
 {
        struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
 
+       cancel_work_sync(&ctrl->ioerr_work);
        cancel_delayed_work_sync(&ctrl->connect_work);
        /*
         * kill the association on the link side.  this will block
 
        INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
        INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+       INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
        spin_lock_init(&ctrl->lock);
 
        /* io queue count */
 
 fail_ctrl:
        nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+       cancel_work_sync(&ctrl->ioerr_work);
        cancel_work_sync(&ctrl->ctrl.reset_work);
        cancel_delayed_work_sync(&ctrl->connect_work);