From: Amir Vadai Date: Mon, 7 Mar 2011 11:11:59 +0000 (+0200) Subject: sdp: make SDP_RX_SIZE a module parameter X-Git-Tag: v4.1.12-92~264^2~5^2~26 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=c238c76aa5279f02711b3b4de1036d99314eba33;p=users%2Fjedix%2Flinux-maple.git sdp: make SDP_RX_SIZE a module parameter Signed-off-by: Amir Vadai --- diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h index 40231a07a35b..02cf8f0e6f42 100644 --- a/drivers/infiniband/ulp/sdp/sdp.h +++ b/drivers/infiniband/ulp/sdp/sdp.h @@ -69,8 +69,8 @@ #define SDP_FIN_WAIT_TIMEOUT (60 * HZ) /* like TCP_FIN_TIMEOUT */ #define SDP_CMA_TIMEWAIT_TIMEOUT (150 * HZ) +extern int sdp_rx_size; #define SDP_TX_SIZE 0x40 -#define SDP_RX_SIZE 0x40 #define SDP_DEF_INLINE_THRESH 256 #define SDP_FMR_SIZE (MIN(0x1000, PAGE_SIZE) / sizeof(u64)) diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c index 7ab188c6198e..ed08df5511e4 100644 --- a/drivers/infiniband/ulp/sdp/sdp_cma.c +++ b/drivers/infiniband/ulp/sdp/sdp_cma.c @@ -52,6 +52,9 @@ #define SDP_MAJV_MINV 0x22 +SDP_MODPARAM_INT(sdp_rx_size, 0x40, "HW rx queue size (max num of credits)." + " Must be power of 2."); + SDP_MODPARAM_SINT(sdp_retry_count, 5, "IB layer retry count"); SDP_MODPARAM_SINT(sdp_link_layer_ib_only, 0, "Support only link layer of " @@ -88,7 +91,7 @@ static int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id) struct ib_qp_init_attr qp_init_attr = { .event_handler = sdp_qp_event_handler, .cap.max_send_wr = SDP_TX_SIZE, - .cap.max_recv_wr = SDP_RX_SIZE, + .cap.max_recv_wr = sdp_rx_size, .cap.max_inline_data = sdp_inline_thresh, .sq_sig_type = IB_SIGNAL_REQ_WR, .qp_type = IB_QPT_RC, diff --git a/drivers/infiniband/ulp/sdp/sdp_rx.c b/drivers/infiniband/ulp/sdp/sdp_rx.c index e6cd2293138b..4ecd0877259c 100644 --- a/drivers/infiniband/ulp/sdp/sdp_rx.c +++ b/drivers/infiniband/ulp/sdp/sdp_rx.c @@ -181,7 +181,7 @@ static int sdp_post_recv(struct sdp_sock *ssk) sdp_prf(sk_ssk(ssk), skb, "Posting skb"); h = (struct sdp_bsdh *)skb->head; - rx_req = ssk->rx_ring.buffer + (id & (SDP_RX_SIZE - 1)); + rx_req = ssk->rx_ring.buffer + (id & (sdp_rx_size - 1)); rx_req->skb = skb; for (i = 0; i < ssk->recv_frags; ++i) { @@ -271,7 +271,7 @@ static inline int sdp_post_recvs_needed(struct sdp_sock *ssk) if (unlikely(!ssk->qp_active)) return 0; - if (likely(posted >= SDP_RX_SIZE)) + if (likely(posted >= sdp_rx_size)) return 0; if (unlikely(posted < SDP_MIN_TX_CREDITS)) @@ -459,7 +459,7 @@ static struct sk_buff *sdp_recv_completion(struct sdp_sock *ssk, int id, int len } dev = ssk->ib_device; - rx_req = &ssk->rx_ring.buffer[id & (SDP_RX_SIZE - 1)]; + rx_req = &ssk->rx_ring.buffer[id & (sdp_rx_size - 1)]; skb = rx_req->skb; sdp_reuse_sdp_buf(ssk, rx_req, len); @@ -849,7 +849,7 @@ static void sdp_rx_ring_purge(struct sdp_sock *ssk) __kfree_skb(skb); } - for (id = 0; id < SDP_RX_SIZE; id++) { + for (id = 0; id < sdp_rx_size; id++) { struct sdp_buf *sbuf = &ssk->rx_ring.buffer[id]; for (i = 1; i < SDP_MAX_SEND_SGES; i++) { @@ -886,17 +886,17 @@ int sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device) atomic_set(&ssk->rx_ring.tail, 1); ssk->rx_ring.buffer = kzalloc( - sizeof *ssk->rx_ring.buffer * SDP_RX_SIZE, GFP_KERNEL); + sizeof *ssk->rx_ring.buffer * sdp_rx_size, GFP_KERNEL); if (!ssk->rx_ring.buffer) { sdp_warn(sk_ssk(ssk), "Unable to allocate RX Ring size %zd.\n", - sizeof(*ssk->rx_ring.buffer) * SDP_RX_SIZE); + sizeof(*ssk->rx_ring.buffer) * sdp_rx_size); return -ENOMEM; } rx_cq = ib_create_cq(device, sdp_rx_irq, sdp_rx_cq_event_handler, - sk_ssk(ssk), SDP_RX_SIZE, IB_CQ_VECTOR_LEAST_ATTACHED); + sk_ssk(ssk), sdp_rx_size, IB_CQ_VECTOR_LEAST_ATTACHED); if (IS_ERR(rx_cq)) { rc = PTR_ERR(rx_cq);