/* we come here whether or not we found a response packet to see if
         * there are any posted WQEs
         */
-       if (qp->is_user)
-               wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_USER);
-       else
-               wqe = queue_head(qp->sq.queue, QUEUE_TYPE_KERNEL);
+       wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
        *wqe_p = wqe;
 
        /* no WQE or requester has not started it yet */
        if (post)
                make_send_cqe(qp, wqe, &cqe);
 
-       if (qp->is_user)
-               advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_USER);
-       else
-               advance_consumer(qp->sq.queue, QUEUE_TYPE_KERNEL);
+       queue_advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
 
        if (post)
                rxe_cq_post(qp->scq, &cqe, 0);
                        wqe->status = IB_WC_WR_FLUSH_ERR;
                        do_complete(qp, wqe);
                } else {
-                       advance_consumer(q, q->type);
+                       queue_advance_consumer(q, q->type);
                }
        }
 }
 
        }
 
        if (cq) {
-               if (cq->is_user)
-                       count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
-               else
-                       count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
-
+               count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
                if (cqe < count) {
                        pr_warn("cqe(%d) < current # elements in queue (%d)",
                                cqe, count);
        int err;
        enum queue_type type;
 
-       type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL;
+       type = QUEUE_TYPE_TO_CLIENT;
        cq->queue = rxe_queue_init(rxe, &cqe,
                        sizeof(struct rxe_cqe), type);
        if (!cq->queue) {
 
        spin_lock_irqsave(&cq->cq_lock, flags);
 
-       if (cq->is_user)
-               full = queue_full(cq->queue, QUEUE_TYPE_TO_USER);
-       else
-               full = queue_full(cq->queue, QUEUE_TYPE_KERNEL);
-
+       full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
        if (unlikely(full)) {
                spin_unlock_irqrestore(&cq->cq_lock, flags);
                if (cq->ibcq.event_handler) {
                return -EBUSY;
        }
 
-       if (cq->is_user)
-               addr = producer_addr(cq->queue, QUEUE_TYPE_TO_USER);
-       else
-               addr = producer_addr(cq->queue, QUEUE_TYPE_KERNEL);
-
+       addr = queue_producer_addr(cq->queue, QUEUE_TYPE_TO_CLIENT);
        memcpy(addr, cqe, sizeof(*cqe));
 
-       if (cq->is_user)
-               advance_producer(cq->queue, QUEUE_TYPE_TO_USER);
-       else
-               advance_producer(cq->queue, QUEUE_TYPE_KERNEL);
+       queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
 
        spin_unlock_irqrestore(&cq->cq_lock, flags);
 
 
        qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
        wqe_size += sizeof(struct rxe_send_wqe);
 
-       type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+       type = QUEUE_TYPE_FROM_CLIENT;
        qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
                                wqe_size, type);
        if (!qp->sq.queue)
                return err;
        }
 
-       if (qp->is_user)
-               qp->req.wqe_index = producer_index(qp->sq.queue,
-                                               QUEUE_TYPE_FROM_USER);
-       else
-               qp->req.wqe_index = producer_index(qp->sq.queue,
-                                               QUEUE_TYPE_KERNEL);
+       qp->req.wqe_index = queue_get_producer(qp->sq.queue,
+                                              QUEUE_TYPE_FROM_CLIENT);
 
        qp->req.state           = QP_STATE_RESET;
        qp->req.opcode          = -1;
                pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
                         qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
 
-               type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+               type = QUEUE_TYPE_FROM_CLIENT;
                qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
                                        wqe_size, type);
                if (!qp->rq.queue)
 
 static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
                         unsigned int num_elem)
 {
-       if (!queue_empty(q, q->type) && (num_elem < queue_count(q, q->type)))
+       enum queue_type type = q->type;
+       u32 prod;
+       u32 cons;
+
+       if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type)))
                return -EINVAL;
 
-       while (!queue_empty(q, q->type)) {
-               memcpy(producer_addr(new_q, new_q->type),
-                                       consumer_addr(q, q->type),
-                                       new_q->elem_size);
-               advance_producer(new_q, new_q->type);
-               advance_consumer(q, q->type);
+       prod = queue_get_producer(new_q, type);
+       cons = queue_get_consumer(q, type);
+
+       while (!queue_empty(q, type)) {
+               memcpy(queue_addr_from_index(new_q, prod),
+                      queue_addr_from_index(q, cons), new_q->elem_size);
+               prod = queue_next_index(new_q, prod);
+               cons = queue_next_index(q, cons);
        }
 
+       new_q->buf->producer_index = prod;
+       q->buf->consumer_index = cons;
+
+       /* update private index copies */
+       if (type == QUEUE_TYPE_TO_CLIENT)
+               new_q->index = new_q->buf->producer_index;
+       else
+               q->index = q->buf->consumer_index;
+
+       /* exchange rxe_queue headers */
        swap(*q, *new_q);
 
        return 0;
 
 /* for definition of shared struct rxe_queue_buf */
 #include <uapi/rdma/rdma_user_rxe.h>
 
-/* implements a simple circular buffer that can optionally be
- * shared between user space and the kernel and can be resized
- * the requested element size is rounded up to a power of 2
- * and the number of elements in the buffer is also rounded
- * up to a power of 2. Since the queue is empty when the
- * producer and consumer indices match the maximum capacity
- * of the queue is one less than the number of element slots
+/* Implements a simple circular buffer that is shared between user
+ * and the driver and can be resized. The requested element size is
+ * rounded up to a power of 2 and the number of elements in the buffer
+ * is also rounded up to a power of 2. Since the queue is empty when
+ * the producer and consumer indices match the maximum capacity of the
+ * queue is one less than the number of element slots.
  *
  * Notes:
- *   - Kernel space indices are always masked off to q->index_mask
- *   before storing so do not need to be checked on reads.
- *   - User space indices may be out of range and must be
- *   masked before use when read.
- *   - The kernel indices for shared queues must not be written
- *   by user space so a local copy is used and a shared copy is
- *   stored when the local copy changes.
+ *   - The driver indices are always masked off to q->index_mask
+ *     before storing so do not need to be checked on reads.
+ *   - The user whether user space or kernel is generally
+ *     not trusted so its parameters are masked to make sure
+ *     they do not access the queue out of bounds on reads.
+ *   - The driver indices for queues must not be written
+ *     by user so a local copy is used and a shared copy is
+ *     stored when the local copy is changed.
  *   - By passing the type in the parameter list separate from q
- *   the compiler can eliminate the switch statement when the
- *   actual queue type is known when the function is called.
- *   In the performance path this is done. In less critical
- *   paths just q->type is passed.
+ *     the compiler can eliminate the switch statement when the
+ *     actual queue type is known when the function is called at
+ *     compile time.
+ *   - These queues are lock free. The user and driver must protect
+ *     changes to their end of the queues with locks if more than one
+ *     CPU can be accessing it at the same time.
  */
 
-/* type of queue */
+/**
+ * enum queue_type - type of queue
+ * @QUEUE_TYPE_TO_CLIENT:      Queue is written by rxe driver and
+ *                             read by client. Used by rxe driver only.
+ * @QUEUE_TYPE_FROM_CLIENT:    Queue is written by client and
+ *                             read by rxe driver. Used by rxe driver only.
+ * @QUEUE_TYPE_TO_DRIVER:      Queue is written by client and
+ *                             read by rxe driver. Used by kernel client only.
+ * @QUEUE_TYPE_FROM_DRIVER:    Queue is written by rxe driver and
+ *                             read by client. Used by kernel client only.
+ */
 enum queue_type {
-       QUEUE_TYPE_KERNEL,
-       QUEUE_TYPE_TO_USER,
-       QUEUE_TYPE_FROM_USER,
+       QUEUE_TYPE_TO_CLIENT,
+       QUEUE_TYPE_FROM_CLIENT,
+       QUEUE_TYPE_TO_DRIVER,
+       QUEUE_TYPE_FROM_DRIVER,
 };
 
 struct rxe_queue {
 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
                     unsigned int elem_size, struct ib_udata *udata,
                     struct mminfo __user *outbuf,
-                    /* Protect producers while resizing queue */
-                    spinlock_t *producer_lock,
-                    /* Protect consumers while resizing queue */
-                    spinlock_t *consumer_lock);
+                    spinlock_t *producer_lock, spinlock_t *consumer_lock);
 
 void rxe_queue_cleanup(struct rxe_queue *queue);
 
-static inline int next_index(struct rxe_queue *q, int index)
+static inline u32 queue_next_index(struct rxe_queue *q, int index)
 {
-       return (index + 1) & q->buf->index_mask;
+       return (index + 1) & q->index_mask;
 }
 
-static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
+static inline u32 queue_get_producer(const struct rxe_queue *q,
+                                    enum queue_type type)
 {
        u32 prod;
-       u32 cons;
 
        switch (type) {
-       case QUEUE_TYPE_FROM_USER:
-               /* protect user space index */
+       case QUEUE_TYPE_FROM_CLIENT:
+               /* protect user index */
                prod = smp_load_acquire(&q->buf->producer_index);
-               cons = q->index;
                break;
-       case QUEUE_TYPE_TO_USER:
+       case QUEUE_TYPE_TO_CLIENT:
                prod = q->index;
-               /* protect user space index */
-               cons = smp_load_acquire(&q->buf->consumer_index);
                break;
-       case QUEUE_TYPE_KERNEL:
+       case QUEUE_TYPE_FROM_DRIVER:
+               /* protect driver index */
+               prod = smp_load_acquire(&q->buf->producer_index);
+               break;
+       case QUEUE_TYPE_TO_DRIVER:
                prod = q->buf->producer_index;
-               cons = q->buf->consumer_index;
                break;
        }
 
-       return ((prod - cons) & q->index_mask) == 0;
+       return prod;
 }
 
-static inline int queue_full(struct rxe_queue *q, enum queue_type type)
+static inline u32 queue_get_consumer(const struct rxe_queue *q,
+                                    enum queue_type type)
 {
-       u32 prod;
        u32 cons;
 
        switch (type) {
-       case QUEUE_TYPE_FROM_USER:
-               /* protect user space index */
-               prod = smp_load_acquire(&q->buf->producer_index);
+       case QUEUE_TYPE_FROM_CLIENT:
                cons = q->index;
                break;
-       case QUEUE_TYPE_TO_USER:
-               prod = q->index;
-               /* protect user space index */
+       case QUEUE_TYPE_TO_CLIENT:
+               /* protect user index */
                cons = smp_load_acquire(&q->buf->consumer_index);
                break;
-       case QUEUE_TYPE_KERNEL:
-               prod = q->buf->producer_index;
+       case QUEUE_TYPE_FROM_DRIVER:
                cons = q->buf->consumer_index;
                break;
+       case QUEUE_TYPE_TO_DRIVER:
+               /* protect driver index */
+               cons = smp_load_acquire(&q->buf->consumer_index);
+               break;
        }
 
-       return ((prod + 1 - cons) & q->index_mask) == 0;
+       return cons;
 }
 
-static inline unsigned int queue_count(const struct rxe_queue *q,
-                                       enum queue_type type)
+static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
 {
-       u32 prod;
-       u32 cons;
-
-       switch (type) {
-       case QUEUE_TYPE_FROM_USER:
-               /* protect user space index */
-               prod = smp_load_acquire(&q->buf->producer_index);
-               cons = q->index;
-               break;
-       case QUEUE_TYPE_TO_USER:
-               prod = q->index;
-               /* protect user space index */
-               cons = smp_load_acquire(&q->buf->consumer_index);
-               break;
-       case QUEUE_TYPE_KERNEL:
-               prod = q->buf->producer_index;
-               cons = q->buf->consumer_index;
-               break;
-       }
+       u32 prod = queue_get_producer(q, type);
+       u32 cons = queue_get_consumer(q, type);
 
-       return (prod - cons) & q->index_mask;
+       return ((prod - cons) & q->index_mask) == 0;
 }
 
-static inline void advance_producer(struct rxe_queue *q, enum queue_type type)
+static inline int queue_full(struct rxe_queue *q, enum queue_type type)
 {
-       u32 prod;
+       u32 prod = queue_get_producer(q, type);
+       u32 cons = queue_get_consumer(q, type);
 
-       switch (type) {
-       case QUEUE_TYPE_FROM_USER:
-               pr_warn_once("Normally kernel should not write user space index\n");
-               /* protect user space index */
-               prod = smp_load_acquire(&q->buf->producer_index);
-               prod = (prod + 1) & q->index_mask;
-               /* same */
-               smp_store_release(&q->buf->producer_index, prod);
-               break;
-       case QUEUE_TYPE_TO_USER:
-               prod = q->index;
-               q->index = (prod + 1) & q->index_mask;
-               q->buf->producer_index = q->index;
-               break;
-       case QUEUE_TYPE_KERNEL:
-               prod = q->buf->producer_index;
-               q->buf->producer_index = (prod + 1) & q->index_mask;
-               break;
-       }
+       return ((prod + 1 - cons) & q->index_mask) == 0;
 }
 
-static inline void advance_consumer(struct rxe_queue *q, enum queue_type type)
+static inline u32 queue_count(const struct rxe_queue *q,
+                                       enum queue_type type)
 {
-       u32 cons;
+       u32 prod = queue_get_producer(q, type);
+       u32 cons = queue_get_consumer(q, type);
 
-       switch (type) {
-       case QUEUE_TYPE_FROM_USER:
-               cons = q->index;
-               q->index = (cons + 1) & q->index_mask;
-               q->buf->consumer_index = q->index;
-               break;
-       case QUEUE_TYPE_TO_USER:
-               pr_warn_once("Normally kernel should not write user space index\n");
-               /* protect user space index */
-               cons = smp_load_acquire(&q->buf->consumer_index);
-               cons = (cons + 1) & q->index_mask;
-               /* same */
-               smp_store_release(&q->buf->consumer_index, cons);
-               break;
-       case QUEUE_TYPE_KERNEL:
-               cons = q->buf->consumer_index;
-               q->buf->consumer_index = (cons + 1) & q->index_mask;
-               break;
-       }
+       return (prod - cons) & q->index_mask;
 }
 
-static inline void *producer_addr(struct rxe_queue *q, enum queue_type type)
+static inline void queue_advance_producer(struct rxe_queue *q,
+                                         enum queue_type type)
 {
        u32 prod;
 
        switch (type) {
-       case QUEUE_TYPE_FROM_USER:
-               /* protect user space index */
-               prod = smp_load_acquire(&q->buf->producer_index);
-               prod &= q->index_mask;
+       case QUEUE_TYPE_FROM_CLIENT:
+               pr_warn("%s: attempt to advance client index\n",
+                       __func__);
                break;
-       case QUEUE_TYPE_TO_USER:
+       case QUEUE_TYPE_TO_CLIENT:
                prod = q->index;
+               prod = (prod + 1) & q->index_mask;
+               q->index = prod;
+               /* protect user index */
+               smp_store_release(&q->buf->producer_index, prod);
+               break;
+       case QUEUE_TYPE_FROM_DRIVER:
+               pr_warn("%s: attempt to advance driver index\n",
+                       __func__);
                break;
-       case QUEUE_TYPE_KERNEL:
+       case QUEUE_TYPE_TO_DRIVER:
                prod = q->buf->producer_index;
+               prod = (prod + 1) & q->index_mask;
+               q->buf->producer_index = prod;
                break;
        }
-
-       return q->buf->data + (prod << q->log2_elem_size);
 }
 
-static inline void *consumer_addr(struct rxe_queue *q, enum queue_type type)
+static inline void queue_advance_consumer(struct rxe_queue *q,
+                                         enum queue_type type)
 {
        u32 cons;
 
        switch (type) {
-       case QUEUE_TYPE_FROM_USER:
+       case QUEUE_TYPE_FROM_CLIENT:
                cons = q->index;
+               cons = (cons + 1) & q->index_mask;
+               q->index = cons;
+               /* protect user index */
+               smp_store_release(&q->buf->consumer_index, cons);
                break;
-       case QUEUE_TYPE_TO_USER:
-               /* protect user space index */
-               cons = smp_load_acquire(&q->buf->consumer_index);
-               cons &= q->index_mask;
+       case QUEUE_TYPE_TO_CLIENT:
+               pr_warn("%s: attempt to advance client index\n",
+                       __func__);
                break;
-       case QUEUE_TYPE_KERNEL:
+       case QUEUE_TYPE_FROM_DRIVER:
                cons = q->buf->consumer_index;
+               cons = (cons + 1) & q->index_mask;
+               q->buf->consumer_index = cons;
+               break;
+       case QUEUE_TYPE_TO_DRIVER:
+               pr_warn("%s: attempt to advance driver index\n",
+                       __func__);
                break;
        }
-
-       return q->buf->data + (cons << q->log2_elem_size);
 }
 
-static inline unsigned int producer_index(struct rxe_queue *q,
-                                               enum queue_type type)
+static inline void *queue_producer_addr(struct rxe_queue *q,
+                                       enum queue_type type)
 {
-       u32 prod;
+       u32 prod = queue_get_producer(q, type);
 
-       switch (type) {
-       case QUEUE_TYPE_FROM_USER:
-               /* protect user space index */
-               prod = smp_load_acquire(&q->buf->producer_index);
-               prod &= q->index_mask;
-               break;
-       case QUEUE_TYPE_TO_USER:
-               prod = q->index;
-               break;
-       case QUEUE_TYPE_KERNEL:
-               prod = q->buf->producer_index;
-               break;
-       }
-
-       return prod;
+       return q->buf->data + (prod << q->log2_elem_size);
 }
 
-static inline unsigned int consumer_index(struct rxe_queue *q,
-                                               enum queue_type type)
+static inline void *queue_consumer_addr(struct rxe_queue *q,
+                                       enum queue_type type)
 {
-       u32 cons;
-
-       switch (type) {
-       case QUEUE_TYPE_FROM_USER:
-               cons = q->index;
-               break;
-       case QUEUE_TYPE_TO_USER:
-               /* protect user space index */
-               cons = smp_load_acquire(&q->buf->consumer_index);
-               cons &= q->index_mask;
-               break;
-       case QUEUE_TYPE_KERNEL:
-               cons = q->buf->consumer_index;
-               break;
-       }
+       u32 cons = queue_get_consumer(q, type);
 
-       return cons;
+       return q->buf->data + (cons << q->log2_elem_size);
 }
 
-static inline void *addr_from_index(struct rxe_queue *q,
-                               unsigned int index)
+static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
 {
        return q->buf->data + ((index & q->index_mask)
-                               << q->buf->log2_elem_size);
+                               << q->log2_elem_size);
 }
 
-static inline unsigned int index_from_addr(const struct rxe_queue *q,
+static inline u32 queue_index_from_addr(const struct rxe_queue *q,
                                const void *addr)
 {
        return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
 
 static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
 {
-       return queue_empty(q, type) ? NULL : consumer_addr(q, type);
+       return queue_empty(q, type) ? NULL : queue_consumer_addr(q, type);
 }
 
 #endif /* RXE_QUEUE_H */
 
        unsigned int cons;
        unsigned int prod;
 
-       if (qp->is_user) {
-               cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
-               prod = producer_index(q, QUEUE_TYPE_FROM_USER);
-       } else {
-               cons = consumer_index(q, QUEUE_TYPE_KERNEL);
-               prod = producer_index(q, QUEUE_TYPE_KERNEL);
-       }
+       cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+       prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
 
        qp->req.wqe_index       = cons;
        qp->req.psn             = qp->comp.psn;
        qp->req.opcode          = -1;
 
        for (wqe_index = cons; wqe_index != prod;
-                       wqe_index = next_index(q, wqe_index)) {
-               wqe = addr_from_index(qp->sq.queue, wqe_index);
+                       wqe_index = queue_next_index(q, wqe_index)) {
+               wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
                mask = wr_opcode_mask(wqe->wr.opcode, qp);
 
                if (wqe->state == wqe_state_posted)
        unsigned int cons;
        unsigned int prod;
 
-       if (qp->is_user) {
-               wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
-               cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
-               prod = producer_index(q, QUEUE_TYPE_FROM_USER);
-       } else {
-               wqe = queue_head(q, QUEUE_TYPE_KERNEL);
-               cons = consumer_index(q, QUEUE_TYPE_KERNEL);
-               prod = producer_index(q, QUEUE_TYPE_KERNEL);
-       }
+       wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
+       cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+       prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
 
        if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
                /* check to see if we are drained;
        if (index == prod)
                return NULL;
 
-       wqe = addr_from_index(q, index);
+       wqe = queue_addr_from_index(q, index);
 
        if (unlikely((qp->req.state == QP_STATE_DRAIN ||
                      qp->req.state == QP_STATE_DRAINED) &&
        qp->req.opcode = pkt->opcode;
 
        if (pkt->mask & RXE_END_MASK)
-               qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+               qp->req.wqe_index = queue_next_index(qp->sq.queue,
+                                                    qp->req.wqe_index);
 
        qp->need_req_skb = 0;
 
 
        wqe->state = wqe_state_done;
        wqe->status = IB_WC_SUCCESS;
-       qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+       qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
 
        if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
            qp->sq_sig_type == IB_SIGNAL_ALL_WR)
                goto exit;
 
        if (unlikely(qp->req.state == QP_STATE_RESET)) {
-               qp->req.wqe_index = consumer_index(q, q->type);
+               qp->req.wqe_index = queue_get_consumer(q,
+                                               QUEUE_TYPE_FROM_CLIENT);
                qp->req.opcode = -1;
                qp->req.need_rd_atomic = 0;
                qp->req.wait_psn = 0;
                        wqe->last_psn = qp->req.psn;
                        qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
                        qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
-                       qp->req.wqe_index = next_index(qp->sq.queue,
+                       qp->req.wqe_index = queue_next_index(qp->sq.queue,
                                                       qp->req.wqe_index);
                        wqe->state = wqe_state_done;
                        wqe->status = IB_WC_SUCCESS;
 
 
        spin_lock_bh(&srq->rq.consumer_lock);
 
-       if (qp->is_user)
-               wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
-       else
-               wqe = queue_head(q, QUEUE_TYPE_KERNEL);
+       wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
        if (!wqe) {
                spin_unlock_bh(&srq->rq.consumer_lock);
                return RESPST_ERR_RNR;
        memcpy(&qp->resp.srq_wqe, wqe, size);
 
        qp->resp.wqe = &qp->resp.srq_wqe.wqe;
-       if (qp->is_user) {
-               advance_consumer(q, QUEUE_TYPE_FROM_USER);
-               count = queue_count(q, QUEUE_TYPE_FROM_USER);
-       } else {
-               advance_consumer(q, QUEUE_TYPE_KERNEL);
-               count = queue_count(q, QUEUE_TYPE_KERNEL);
-       }
+       queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+       count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
 
        if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
                srq->limit = 0;
                        qp->resp.status = IB_WC_WR_FLUSH_ERR;
                        return RESPST_COMPLETE;
                } else if (!srq) {
-                       if (qp->is_user)
-                               qp->resp.wqe = queue_head(qp->rq.queue,
-                                               QUEUE_TYPE_FROM_USER);
-                       else
-                               qp->resp.wqe = queue_head(qp->rq.queue,
-                                               QUEUE_TYPE_KERNEL);
+                       qp->resp.wqe = queue_head(qp->rq.queue,
+                                       QUEUE_TYPE_FROM_CLIENT);
                        if (qp->resp.wqe) {
                                qp->resp.status = IB_WC_WR_FLUSH_ERR;
                                return RESPST_COMPLETE;
                if (srq)
                        return get_srq_wqe(qp);
 
-               if (qp->is_user)
-                       qp->resp.wqe = queue_head(qp->rq.queue,
-                                       QUEUE_TYPE_FROM_USER);
-               else
-                       qp->resp.wqe = queue_head(qp->rq.queue,
-                                       QUEUE_TYPE_KERNEL);
+               qp->resp.wqe = queue_head(qp->rq.queue,
+                               QUEUE_TYPE_FROM_CLIENT);
                return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
        }
 
        }
 
        /* have copy for srq and reference for !srq */
-       if (!qp->srq) {
-               if (qp->is_user)
-                       advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_USER);
-               else
-                       advance_consumer(qp->rq.queue, QUEUE_TYPE_KERNEL);
-       }
+       if (!qp->srq)
+               queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
 
        qp->resp.wqe = NULL;
 
                return;
 
        while (!qp->srq && q && queue_head(q, q->type))
-               advance_consumer(q, q->type);
+               queue_advance_consumer(q, q->type);
 }
 
 int rxe_responder(void *arg)
 
        spin_lock_init(&srq->rq.producer_lock);
        spin_lock_init(&srq->rq.consumer_lock);
 
-       type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+       type = QUEUE_TYPE_FROM_CLIENT;
        q = rxe_queue_init(rxe, &srq->rq.max_wr,
                        srq_wqe_size, type);
        if (!q) {
 
        int num_sge = ibwr->num_sge;
        int full;
 
-       if (rq->is_user)
-               full = queue_full(rq->queue, QUEUE_TYPE_FROM_USER);
-       else
-               full = queue_full(rq->queue, QUEUE_TYPE_KERNEL);
-
+       full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER);
        if (unlikely(full)) {
                err = -ENOMEM;
                goto err1;
        for (i = 0; i < num_sge; i++)
                length += ibwr->sg_list[i].length;
 
-       if (rq->is_user)
-               recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_FROM_USER);
-       else
-               recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_KERNEL);
-
+       recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER);
        recv_wqe->wr_id = ibwr->wr_id;
        recv_wqe->num_sge = num_sge;
 
        recv_wqe->dma.cur_sge           = 0;
        recv_wqe->dma.sge_offset        = 0;
 
-       if (rq->is_user)
-               advance_producer(rq->queue, QUEUE_TYPE_FROM_USER);
-       else
-               advance_producer(rq->queue, QUEUE_TYPE_KERNEL);
+       queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER);
 
        return 0;
 
 
        spin_lock_irqsave(&qp->sq.sq_lock, flags);
 
-       if (qp->is_user)
-               full = queue_full(sq->queue, QUEUE_TYPE_FROM_USER);
-       else
-               full = queue_full(sq->queue, QUEUE_TYPE_KERNEL);
+       full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
 
        if (unlikely(full)) {
                spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
                return -ENOMEM;
        }
 
-       if (qp->is_user)
-               send_wqe = producer_addr(sq->queue, QUEUE_TYPE_FROM_USER);
-       else
-               send_wqe = producer_addr(sq->queue, QUEUE_TYPE_KERNEL);
-
+       send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_TO_DRIVER);
        init_send_wqe(qp, ibwr, mask, length, send_wqe);
 
-       if (qp->is_user)
-               advance_producer(sq->queue, QUEUE_TYPE_FROM_USER);
-       else
-               advance_producer(sq->queue, QUEUE_TYPE_KERNEL);
+       queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
 
        spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
 
 
        spin_lock_irqsave(&cq->cq_lock, flags);
        for (i = 0; i < num_entries; i++) {
-               if (cq->is_user)
-                       cqe = queue_head(cq->queue, QUEUE_TYPE_TO_USER);
-               else
-                       cqe = queue_head(cq->queue, QUEUE_TYPE_KERNEL);
+               cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
                if (!cqe)
                        break;
 
                memcpy(wc++, &cqe->ibwc, sizeof(*wc));
-               if (cq->is_user)
-                       advance_consumer(cq->queue, QUEUE_TYPE_TO_USER);
-               else
-                       advance_consumer(cq->queue, QUEUE_TYPE_KERNEL);
+               queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
        }
        spin_unlock_irqrestore(&cq->cq_lock, flags);
 
        struct rxe_cq *cq = to_rcq(ibcq);
        int count;
 
-       if (cq->is_user)
-               count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
-       else
-               count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
+       count = queue_count(cq->queue, QUEUE_TYPE_FROM_DRIVER);
 
        return (count > wc_cnt) ? wc_cnt : count;
 }
        if (cq->notify != IB_CQ_NEXT_COMP)
                cq->notify = flags & IB_CQ_SOLICITED_MASK;
 
-       if (cq->is_user)
-               empty = queue_empty(cq->queue, QUEUE_TYPE_TO_USER);
-       else
-               empty = queue_empty(cq->queue, QUEUE_TYPE_KERNEL);
+       empty = queue_empty(cq->queue, QUEUE_TYPE_FROM_DRIVER);
 
        if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
                ret = 1;