#define SDP_MAJV_MINV 0x22
+SDP_MODPARAM_INT(sdp_rx_size, 0x40, "HW rx queue size (max num of credits)."
+ " Must be power of 2.");
+
SDP_MODPARAM_SINT(sdp_retry_count, 5, "IB layer retry count");
SDP_MODPARAM_SINT(sdp_link_layer_ib_only, 0, "Support only link layer of "
struct ib_qp_init_attr qp_init_attr = {
.event_handler = sdp_qp_event_handler,
.cap.max_send_wr = SDP_TX_SIZE,
- .cap.max_recv_wr = SDP_RX_SIZE,
+ .cap.max_recv_wr = sdp_rx_size,
.cap.max_inline_data = sdp_inline_thresh,
.sq_sig_type = IB_SIGNAL_REQ_WR,
.qp_type = IB_QPT_RC,
sdp_prf(sk_ssk(ssk), skb, "Posting skb");
h = (struct sdp_bsdh *)skb->head;
- rx_req = ssk->rx_ring.buffer + (id & (SDP_RX_SIZE - 1));
+ rx_req = ssk->rx_ring.buffer + (id & (sdp_rx_size - 1));
rx_req->skb = skb;
for (i = 0; i < ssk->recv_frags; ++i) {
if (unlikely(!ssk->qp_active))
return 0;
- if (likely(posted >= SDP_RX_SIZE))
+ if (likely(posted >= sdp_rx_size))
return 0;
if (unlikely(posted < SDP_MIN_TX_CREDITS))
}
dev = ssk->ib_device;
- rx_req = &ssk->rx_ring.buffer[id & (SDP_RX_SIZE - 1)];
+ rx_req = &ssk->rx_ring.buffer[id & (sdp_rx_size - 1)];
skb = rx_req->skb;
sdp_reuse_sdp_buf(ssk, rx_req, len);
__kfree_skb(skb);
}
- for (id = 0; id < SDP_RX_SIZE; id++) {
+ for (id = 0; id < sdp_rx_size; id++) {
struct sdp_buf *sbuf = &ssk->rx_ring.buffer[id];
for (i = 1; i < SDP_MAX_SEND_SGES; i++) {
atomic_set(&ssk->rx_ring.tail, 1);
ssk->rx_ring.buffer = kzalloc(
- sizeof *ssk->rx_ring.buffer * SDP_RX_SIZE, GFP_KERNEL);
+ sizeof *ssk->rx_ring.buffer * sdp_rx_size, GFP_KERNEL);
if (!ssk->rx_ring.buffer) {
sdp_warn(sk_ssk(ssk),
"Unable to allocate RX Ring size %zd.\n",
- sizeof(*ssk->rx_ring.buffer) * SDP_RX_SIZE);
+ sizeof(*ssk->rx_ring.buffer) * sdp_rx_size);
return -ENOMEM;
}
rx_cq = ib_create_cq(device, sdp_rx_irq, sdp_rx_cq_event_handler,
- sk_ssk(ssk), SDP_RX_SIZE, IB_CQ_VECTOR_LEAST_ATTACHED);
+ sk_ssk(ssk), sdp_rx_size, IB_CQ_VECTOR_LEAST_ATTACHED);
if (IS_ERR(rx_cq)) {
rc = PTR_ERR(rx_cq);