if (sdev->use_srq)
                send_queue_depth = sdev->srq_size;
        else
-               send_queue_depth = min(SRPT_RQ_SIZE,
+               send_queue_depth = min(MAX_SRPT_RQ_SIZE,
                                       sdev->device->attrs.max_qp_wr);
 
        memset(iocp, 0, sizeof(*iocp));
        struct srpt_port *sport = ch->sport;
        struct srpt_device *sdev = sport->sdev;
        const struct ib_device_attr *attrs = &sdev->device->attrs;
-       u32 srp_sq_size = sport->port_attrib.srp_sq_size;
+       int sq_size = sport->port_attrib.srp_sq_size;
        int i, ret;
 
        WARN_ON(ch->rq_size < 1);
                goto out;
 
 retry:
-       ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + srp_sq_size,
+       ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
                        0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
        if (IS_ERR(ch->cq)) {
                ret = PTR_ERR(ch->cq);
                pr_err("failed to create CQ cqe= %d ret= %d\n",
-                      ch->rq_size + srp_sq_size, ret);
+                      ch->rq_size + sq_size, ret);
                goto out;
        }
 
         * both both, as RDMA contexts will also post completions for the
         * RDMA READ case.
         */
-       qp_init->cap.max_send_wr = min(srp_sq_size / 2, attrs->max_qp_wr + 0U);
-       qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
+       qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
+       qp_init->cap.max_rdma_ctxs = sq_size / 2;
        qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
        qp_init->port_num = ch->sport->port;
        if (sdev->use_srq) {
        if (IS_ERR(ch->qp)) {
                ret = PTR_ERR(ch->qp);
                if (ret == -ENOMEM) {
-                       srp_sq_size /= 2;
-                       if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
+                       sq_size /= 2;
+                       if (sq_size >= MIN_SRPT_SQ_SIZE) {
                                ib_destroy_cq(ch->cq);
                                goto retry;
                        }
 
        srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
                             ch->sport->sdev, ch->rq_size,
-                            ch->rsp_size, DMA_TO_DEVICE);
+                            ch->max_rsp_size, DMA_TO_DEVICE);
 
        srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
                             sdev, ch->rq_size,
         * depth to avoid that the initiator driver has to report QUEUE_FULL
         * to the SCSI mid-layer.
         */
-       ch->rq_size = min(SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
+       ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
        spin_lock_init(&ch->spinlock);
        ch->state = CH_CONNECTING;
        INIT_LIST_HEAD(&ch->cmd_wait_list);
-       ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+       ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
 
        ch->ioctx_ring = (struct srpt_send_ioctx **)
                srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
                                      sizeof(*ch->ioctx_ring[0]),
-                                     ch->rsp_size, DMA_TO_DEVICE);
+                                     ch->max_rsp_size, DMA_TO_DEVICE);
        if (!ch->ioctx_ring)
                goto free_ch;
 
 free_ring:
        srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
                             ch->sport->sdev, ch->rq_size,
-                            ch->rsp_size, DMA_TO_DEVICE);
+                            ch->max_rsp_size, DMA_TO_DEVICE);
 free_ch:
        kfree(ch);