dev->dsr->resp_slot_dma = (u64)slot_dma;
 
        /* Async event ring */
-       dev->dsr->async_ring_pages.num_pages = 4;
+       dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
        ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
                                   dev->dsr->async_ring_pages.num_pages, true);
        if (ret)
        dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
 
        /* CQ notification ring */
-       dev->dsr->cq_ring_pages.num_pages = 4;
+       dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
        ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
                                   dev->dsr->cq_ring_pages.num_pages, true);
        if (ret)
 
        return ret;
 }
 
-static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n)
+static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
 {
        return pvrdma_page_dir_get_ptr(&qp->pdir,
                                       qp->sq.offset + n * qp->sq.wqe_size);
 }
 
-static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n)
+static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
 {
        return pvrdma_page_dir_get_ptr(&qp->pdir,
                                       qp->rq.offset + n * qp->rq.wqe_size);
        unsigned long flags;
        struct pvrdma_sq_wqe_hdr *wqe_hdr;
        struct pvrdma_sge *sge;
-       int i, index;
-       int nreq;
-       int ret;
+       int i, ret;
 
        /*
         * In states lower than RTS, we can fail immediately. In other states,
 
        spin_lock_irqsave(&qp->sq.lock, flags);
 
-       index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt);
-       for (nreq = 0; wr; nreq++, wr = wr->next) {
-               unsigned int tail;
+       while (wr) {
+               unsigned int tail = 0;
 
                if (unlikely(!pvrdma_idx_ring_has_space(
                                qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
                        }
                }
 
-               wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index);
+               wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
                memset(wqe_hdr, 0, sizeof(*wqe_hdr));
                wqe_hdr->wr_id = wr->wr_id;
                wqe_hdr->num_sge = wr->num_sge;
                /* Make sure wqe is written before index update */
                smp_wmb();
 
-               index++;
-               if (unlikely(index >= qp->sq.wqe_cnt))
-                       index = 0;
                /* Update shared sq ring */
                pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
                                    qp->sq.wqe_cnt);
+
+               wr = wr->next;
        }
 
        ret = 0;
        struct pvrdma_qp *qp = to_vqp(ibqp);
        struct pvrdma_rq_wqe_hdr *wqe_hdr;
        struct pvrdma_sge *sge;
-       int index, nreq;
        int ret = 0;
        int i;
 
 
        spin_lock_irqsave(&qp->rq.lock, flags);
 
-       index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt);
-       for (nreq = 0; wr; nreq++, wr = wr->next) {
-               unsigned int tail;
+       while (wr) {
+               unsigned int tail = 0;
 
                if (unlikely(wr->num_sge > qp->rq.max_sg ||
                             wr->num_sge < 0)) {
                        goto out;
                }
 
-               wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index);
+               wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
                wqe_hdr->wr_id = wr->wr_id;
                wqe_hdr->num_sge = wr->num_sge;
                wqe_hdr->total_len = 0;
                /* Make sure wqe is written before index update */
                smp_wmb();
 
-               index++;
-               if (unlikely(index >= qp->rq.wqe_cnt))
-                       index = 0;
                /* Update shared rq ring */
                pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
                                    qp->rq.wqe_cnt);
+
+               wr = wr->next;
        }
 
        spin_unlock_irqrestore(&qp->rq.lock, flags);