struct nvme_ctrl *ctrl =
                container_of(work, struct nvme_ctrl, async_event_work);
 
-       spin_lock_irq(&ctrl->lock);
-       while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
-               int aer_idx = --ctrl->event_limit;
-
-               spin_unlock_irq(&ctrl->lock);
-               ctrl->ops->submit_async_event(ctrl, aer_idx);
-               spin_lock_irq(&ctrl->lock);
-       }
-       spin_unlock_irq(&ctrl->lock);
+       ctrl->ops->submit_async_event(ctrl);
 }
 
 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
                union nvme_result *res)
 {
        u32 result = le32_to_cpu(res->u32);
-       bool done = true;
 
-       switch (le16_to_cpu(status) >> 1) {
-       case NVME_SC_SUCCESS:
-               done = false;
-               /*FALLTHRU*/
-       case NVME_SC_ABORT_REQ:
-               ++ctrl->event_limit;
-               if (ctrl->state == NVME_CTRL_LIVE)
-                       queue_work(nvme_wq, &ctrl->async_event_work);
-               break;
-       default:
-               break;
-       }
-
-       if (done)
+       if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
                return;
 
        switch (result & 0xff07) {
        default:
                dev_warn(ctrl->device, "async event result %08x\n", result);
        }
+       queue_work(nvme_wq, &ctrl->async_event_work);
 }
 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 
 void nvme_queue_async_events(struct nvme_ctrl *ctrl)
 {
-       ctrl->event_limit = NVME_NR_AEN_COMMANDS;
        queue_work(nvme_wq, &ctrl->async_event_work);
 }
 EXPORT_SYMBOL_GPL(nvme_queue_async_events);
 
 }
 
 static void
-nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+nvme_fc_submit_async_event(struct nvme_ctrl *arg)
 {
        struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
        struct nvme_fc_fcp_op *aen_op;
        bool terminating = false;
        blk_status_t ret;
 
-       if (aer_idx > NVME_NR_AEN_COMMANDS)
-               return;
-
        spin_lock_irqsave(&ctrl->lock, flags);
        if (ctrl->flags & FCCTRL_TERMIO)
                terminating = true;
        if (terminating)
                return;
 
-       aen_op = &ctrl->aen_ops[aer_idx];
+       aen_op = &ctrl->aen_ops[0];
 
        ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
                                        NVMEFC_FCP_NODATA);
        if (ret)
                dev_err(ctrl->ctrl.device,
-                       "failed async event work [%d]\n", aer_idx);
+                       "failed async event work\n");
 }
 
 static void
 
        u16 nssa;
        u16 nr_streams;
        atomic_t abort_limit;
-       u8 event_limit;
        u8 vwc;
        u32 vs;
        u32 sgls;
        int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
        int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
        void (*free_ctrl)(struct nvme_ctrl *ctrl);
-       void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
+       void (*submit_async_event)(struct nvme_ctrl *ctrl);
        void (*delete_ctrl)(struct nvme_ctrl *ctrl);
        int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
        int (*reinit_request)(void *data, struct request *rq);
 
        return __nvme_poll(nvmeq, tag);
 }
 
-static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
+static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
 {
        struct nvme_dev *dev = to_nvme_dev(ctrl);
        struct nvme_queue *nvmeq = dev->queues[0];
 
        memset(&c, 0, sizeof(c));
        c.common.opcode = nvme_admin_async_event;
-       c.common.command_id = NVME_AQ_BLK_MQ_DEPTH + aer_idx;
+       c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
 
        spin_lock_irq(&nvmeq->q_lock);
        __nvme_submit_cmd(nvmeq, &c);
 
        return queue->ctrl->tag_set.tags[queue_idx - 1];
 }
 
-static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
 {
        struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
        struct nvme_rdma_queue *queue = &ctrl->queues[0];
        struct ib_sge sge;
        int ret;
 
-       if (WARN_ON_ONCE(aer_idx != 0))
-               return;
-
        ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
 
        memset(cmd, 0, sizeof(*cmd));
 
        return BLK_STS_OK;
 }
 
-static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
 {
        struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
        struct nvme_loop_queue *queue = &ctrl->queues[0];