nvme_start_queues(ctrl);
        /* read FW slot information to clear the AER */
        nvme_get_fw_slot_info(ctrl);
+
+       queue_work(nvme_wq, &ctrl->async_event_work);
 }
 
 static u32 nvme_aer_type(u32 result)
        return (result & 0xff00) >> 8;
 }
 
-static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
 {
        u32 aer_notice_type = nvme_aer_subtype(result);
+       bool requeue = true;
 
        trace_nvme_async_event(ctrl, aer_notice_type);
 
                 */
                if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
                        nvme_auth_stop(ctrl);
+                       requeue = false;
                        queue_work(nvme_wq, &ctrl->fw_act_work);
                }
                break;
        default:
                dev_warn(ctrl->device, "async event result %08x\n", result);
        }
+       return requeue;
 }
 
 static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
        u32 result = le32_to_cpu(res->u32);
        u32 aer_type = nvme_aer_type(result);
        u32 aer_subtype = nvme_aer_subtype(result);
+       bool requeue = true;
 
        if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
                return;
 
        switch (aer_type) {
        case NVME_AER_NOTICE:
-               nvme_handle_aen_notice(ctrl, result);
+               requeue = nvme_handle_aen_notice(ctrl, result);
                break;
        case NVME_AER_ERROR:
                /*
        default:
                break;
        }
-       queue_work(nvme_wq, &ctrl->async_event_work);
+
+       if (requeue)
+               queue_work(nvme_wq, &ctrl->async_event_work);
 }
 EXPORT_SYMBOL_GPL(nvme_complete_async_event);