This function improves code readability and reduces code duplication.
Signed-off-by: Israel Rukshin <israelr@mellanox.com>
Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
        put_device(ctrl->device);
 }
 
+static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
+{
+       return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
+}
+
 void nvme_complete_rq(struct request *req);
 bool nvme_cancel_request(struct request *req, void *data, bool reserved);
 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
 
         * aborts.  We don't even bother to allocate a struct request
         * for them but rather special case them here.
         */
-       if (unlikely(nvmeq->qid == 0 &&
-                       cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+       if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
                nvme_complete_async_event(&nvmeq->dev->ctrl,
                                cqe->status, &cqe->result);
                return;
 
         * aborts.  We don't even bother to allocate a struct request
         * for them but rather special case them here.
         */
-       if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
-                       cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+       if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
+                                    cqe->command_id)))
                nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
                                &cqe->result);
        else
 
         * aborts.  We don't even bother to allocate a struct request
         * for them but rather special case them here.
         */
-       if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
-           cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+       if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
+                                    cqe->command_id)))
                nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
                                &cqe->result);
        else
 
         * aborts.  We don't even bother to allocate a struct request
         * for them but rather special case them here.
         */
-       if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
-                       cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+       if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
+                                    cqe->command_id))) {
                nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
                                &cqe->result);
        } else {