qp->s_tail_ack_queue = 0;
        qp->s_acked_ack_queue = 0;
        qp->s_num_rd_atomic = 0;
-       if (qp->r_rq.kwq)
-               qp->r_rq.kwq->count = qp->r_rq.size;
        qp->r_sge.num_sge = 0;
        atomic_set(&qp->s_reserved_used, 0);
 }
        return 0;
 }
 
-/**
- * get_count - count numbers of request work queue entries
- * in circular buffer
- * @rq: data structure for request queue entry
- * @tail: tail indices of the circular buffer
- * @head: head indices of the circular buffer
- *
- * Return - total number of entries in the circular buffer
- */
-static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
-{
-       u32 count;
-
-       count = head;
-
-       if (count >= rq->size)
-               count = 0;
-       if (count < tail)
-               count += rq->size - tail;
-       else
-               count -= tail;
-
-       return count;
-}
-
 /**
  * get_rvt_head - get head indices of the circular buffer
  * @rq: data structure for request queue entry
 
        if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
                head = get_rvt_head(rq, ip);
-               kwq->count = get_count(rq, tail, head);
+               kwq->count = rvt_get_rq_count(rq, head, tail);
        }
        if (unlikely(kwq->count == 0)) {
                ret = 0;
                 * the number of remaining WQEs.
                 */
                if (kwq->count < srq->limit) {
-                       kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
+                       kwq->count =
+                               rvt_get_rq_count(rq,
+                                                get_rvt_head(rq, ip), tail);
                        if (kwq->count < srq->limit) {
                                struct ib_event ev;
 
 
                         * not atomic, which is OK, since the fuzziness is
                         * resolved as further ACKs go out.
                         */
-                       credits = head - tail;
-                       if ((int)credits < 0)
-                               credits += qp->r_rq.size;
+                       credits = rvt_get_rq_count(&qp->r_rq, head, tail);
                }
                /*
                 * Binary search the credit table to find the code to
 
        spinlock_t lock ____cacheline_aligned_in_smp;
 };
 
+/**
+ * rvt_get_rq_count - count numbers of request work queue entries
+ * in circular buffer
+ * @rq: data structure for request queue entry
+ * @head: head indices of the circular buffer
+ * @tail: tail indices of the circular buffer
+ *
+ * Return - total number of entries in the Receive Queue
+ */
+
+static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
+{
+       u32 count = head - tail;
+
+       if ((s32)count < 0)
+               count += rq->size;
+       return count;
+}
+
 /*
  * This structure holds the information that the send tasklet needs
  * to send a RDMA read response or atomic operation.