struct ib_event event;
        struct iwch_qp_attributes attrs;
        struct iwch_qp *qhp;
+       unsigned long flag;
 
        spin_lock(&rnicp->lock);
        qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
        if (qhp->ibqp.event_handler)
                (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
 
+       spin_lock_irqsave(&chp->comp_handler_lock, flag);
        (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+       spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
 
        if (atomic_dec_and_test(&qhp->refcnt))
                wake_up(&qhp->wait);
        struct iwch_cq *chp;
        struct iwch_qp *qhp;
        u32 cqid = RSPQ_CQID(rsp_msg);
+       unsigned long flag;
 
        rnicp = (struct iwch_dev *) rdev_p->ulp;
        spin_lock(&rnicp->lock);
                 */
                if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
                        dst_confirm(qhp->ep->dst);
+               spin_lock_irqsave(&chp->comp_handler_lock, flag);
                (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+               spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
                break;
 
        case TPT_ERR_STAG:
 
        chp->rhp = rhp;
        chp->ibcq.cqe = 1 << chp->cq.size_log2;
        spin_lock_init(&chp->lock);
+       spin_lock_init(&chp->comp_handler_lock);
        atomic_set(&chp->refcnt, 1);
        init_waitqueue_head(&chp->wait);
        if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
 
        flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
        spin_unlock(&qhp->lock);
        spin_unlock_irqrestore(&rchp->lock, *flag);
-       if (flushed)
+       if (flushed) {
+               spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
                (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+               spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
+       }
 
        /* locking hierarchy: cq lock first, then qp lock. */
        spin_lock_irqsave(&schp->lock, *flag);
        flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
        spin_unlock(&qhp->lock);
        spin_unlock_irqrestore(&schp->lock, *flag);
-       if (flushed)
+       if (flushed) {
+               spin_lock_irqsave(&schp->comp_handler_lock, *flag);
                (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+               spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
+       }
 
        /* deref */
        if (atomic_dec_and_test(&qhp->refcnt))
        if (qhp->ibqp.uobject) {
                cxio_set_wq_in_error(&qhp->wq);
                cxio_set_cq_in_error(&rchp->cq);
+               spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
                (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+               spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
                if (schp != rchp) {
                        cxio_set_cq_in_error(&schp->cq);
+                       spin_lock_irqsave(&schp->comp_handler_lock, *flag);
                        (*schp->ibcq.comp_handler)(&schp->ibcq,
                                                   schp->ibcq.cq_context);
+                       spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
                }
                return;
        }