temp_wqe = q->qe[q->host_index].wqe;
 
        /* If the host has not yet processed the next entry then we are done */
-       if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+       if (((q->host_index + 1) % q->entry_count) == q->hba_index) {
+               q->WQ_overflow++;
                return -ENOMEM;
+       }
+       q->WQ_posted++;
        /* set consumption flag every once in a while */
        if (!((q->host_index + 1) % q->entry_repost))
                bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
        case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "2537 Receive Frame Truncated!!\n");
+               hrq->RQ_buf_trunc++;
        case FC_STATUS_RQ_SUCCESS:
                lpfc_sli4_rq_release(hrq, drq);
                spin_lock_irqsave(&phba->hbalock, iflags);
                dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
                if (!dma_buf) {
+                       hrq->RQ_no_buf_found++;
                        spin_unlock_irqrestore(&phba->hbalock, iflags);
                        goto out;
                }
+               hrq->RQ_rcv_buf++;
                memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
                /* save off the frame for the word thread to process */
                list_add_tail(&dma_buf->cq_event.list,
                break;
        case FC_STATUS_INSUFF_BUF_NEED_BUF:
        case FC_STATUS_INSUFF_BUF_FRM_DISC:
+               hrq->RQ_no_posted_buf++;
                /* Post more buffers if possible */
                spin_lock_irqsave(&phba->hbalock, iflags);
                phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
                        workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
                        if (!(++ecount % cq->entry_repost))
                                lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+                       cq->CQ_mbox++;
                }
                break;
        case LPFC_WCQ:
                        if (!(++ecount % cq->entry_repost))
                                lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
                }
+
+               /* Track the max number of CQEs processed in 1 EQ */
+               if (ecount > cq->CQ_max_cqe)
+                       cq->CQ_max_cqe = ecount;
                break;
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
        /* Check and process for different type of WCQE and dispatch */
        switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
        case CQE_CODE_COMPL_WQE:
+               cq->CQ_wq++;
                /* Process the WQ complete event */
                phba->last_completion_time = jiffies;
                lpfc_sli4_fp_handle_fcp_wcqe(phba,
                                (struct lpfc_wcqe_complete *)&wcqe);
                break;
        case CQE_CODE_RELEASE_WQE:
+               cq->CQ_release_wqe++;
                /* Process the WQ release event */
                lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
                                (struct lpfc_wcqe_release *)&wcqe);
                break;
        case CQE_CODE_XRI_ABORTED:
+               cq->CQ_xri_aborted++;
                /* Process the WQ XRI abort event */
                phba->last_completion_time = jiffies;
                workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
                        lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
        }
 
+       /* Track the max number of CQEs processed in 1 EQ */
+       if (ecount > cq->CQ_max_cqe)
+               cq->CQ_max_cqe = ecount;
+
        /* Catch the no cq entry condition */
        if (unlikely(ecount == 0))
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 
        /* Check device state for handling interrupt */
        if (unlikely(lpfc_intr_state_check(phba))) {
+               speq->EQ_badstate++;
                /* Check again for link_state with lock held */
                spin_lock_irqsave(&phba->hbalock, iflag);
                if (phba->link_state < LPFC_LINK_DOWN)
                lpfc_sli4_sp_handle_eqe(phba, eqe);
                if (!(++ecount % speq->entry_repost))
                        lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
+               speq->EQ_processed++;
        }
 
+       /* Track the max number of EQEs processed in 1 intr */
+       if (ecount > speq->EQ_max_eqe)
+               speq->EQ_max_eqe = ecount;
+
        /* Always clear and re-arm the slow-path EQ */
        lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
 
        /* Catch the no cq entry condition */
        if (unlikely(ecount == 0)) {
+               speq->EQ_no_entry++;
                if (phba->intr_type == MSIX)
                        /* MSI-X treated interrupt served as no EQ share INT */
                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 
        /* Check device state for handling interrupt */
        if (unlikely(lpfc_intr_state_check(phba))) {
+               fpeq->EQ_badstate++;
                /* Check again for link_state with lock held */
                spin_lock_irqsave(&phba->hbalock, iflag);
                if (phba->link_state < LPFC_LINK_DOWN)
                lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
                if (!(++ecount % fpeq->entry_repost))
                        lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
+               fpeq->EQ_processed++;
        }
 
+       /* Track the max number of EQEs processed in 1 intr */
+       if (ecount > fpeq->EQ_max_eqe)
+               fpeq->EQ_max_eqe = ecount;
+
        /* Always clear and re-arm the fast-path EQ */
        lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
 
        if (unlikely(ecount == 0)) {
+               fpeq->EQ_no_entry++;
                if (phba->intr_type == MSIX)
                        /* MSI-X treated interrupt served as no EQ share INT */
                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,