u32 cq_handle;
        bool is_kernel;
        atomic_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_id_table {
        u32 srq_handle;
        int npages;
        refcount_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_qp {
        bool is_kernel;
        struct mutex mutex; /* QP state mutex. */
        atomic_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_dev {
 
                pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
 
        atomic_set(&cq->refcnt, 1);
-       init_waitqueue_head(&cq->wait);
+       init_completion(&cq->free);
        spin_lock_init(&cq->cq_lock);
 
        memset(cmd, 0, sizeof(*cmd));
 
 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
 {
-       atomic_dec(&cq->refcnt);
-       wait_event(cq->wait, !atomic_read(&cq->refcnt));
+       if (atomic_dec_and_test(&cq->refcnt))
+               complete(&cq->free);
+       wait_for_completion(&cq->free);
 
        if (!cq->is_kernel)
                ib_umem_release(cq->umem);
 
                ibqp->event_handler(&e, ibqp->qp_context);
        }
        if (qp) {
-               atomic_dec(&qp->refcnt);
-               if (atomic_read(&qp->refcnt) == 0)
-                       wake_up(&qp->wait);
+               if (atomic_dec_and_test(&qp->refcnt))
+                       complete(&qp->free);
        }
 }
 
                ibcq->event_handler(&e, ibcq->cq_context);
        }
        if (cq) {
-               atomic_dec(&cq->refcnt);
-               if (atomic_read(&cq->refcnt) == 0)
-                       wake_up(&cq->wait);
+               if (atomic_dec_and_test(&cq->refcnt))
+                       complete(&cq->free);
        }
 }
 
        }
        if (srq) {
                if (refcount_dec_and_test(&srq->refcnt))
-                       wake_up(&srq->wait);
+                       complete(&srq->free);
        }
 }
 
                if (cq && cq->ibcq.comp_handler)
                        cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
                if (cq) {
-                       atomic_dec(&cq->refcnt);
-                       if (atomic_read(&cq->refcnt))
-                               wake_up(&cq->wait);
+                       if (atomic_dec_and_test(&cq->refcnt))
+                               complete(&cq->free);
                }
                pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
        }
 
                spin_lock_init(&qp->rq.lock);
                mutex_init(&qp->mutex);
                atomic_set(&qp->refcnt, 1);
-               init_waitqueue_head(&qp->wait);
+               init_completion(&qp->free);
 
                qp->state = IB_QPS_RESET;
 
 
        pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
 
-       atomic_dec(&qp->refcnt);
-       wait_event(qp->wait, !atomic_read(&qp->refcnt));
+       if (atomic_dec_and_test(&qp->refcnt))
+               complete(&qp->free);
+       wait_for_completion(&qp->free);
 
        if (!qp->is_kernel) {
                if (qp->rumem)
 
 
        spin_lock_init(&srq->lock);
        refcount_set(&srq->refcnt, 1);
-       init_waitqueue_head(&srq->wait);
+       init_completion(&srq->free);
 
        dev_dbg(&dev->pdev->dev,
                "create shared receive queue from user space\n");
        dev->srq_tbl[srq->srq_handle] = NULL;
        spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
 
-       if (!refcount_dec_and_test(&srq->refcnt))
-               wait_event(srq->wait, !refcount_read(&srq->refcnt));
+       if (refcount_dec_and_test(&srq->refcnt))
+               complete(&srq->free);
+       wait_for_completion(&srq->free);
 
        /* There is no support for kernel clients, so this is safe. */
        ib_umem_release(srq->umem);