From: Amir Vadai Date: Thu, 22 Apr 2010 08:26:57 +0000 (+0300) Subject: sdp: use max number of SGE from HW capabilities X-Git-Tag: v4.1.12-92~264^2~5^2~192 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=717b55f94d8dc05d1a314739b3b72d58497451e8;p=users%2Fjedix%2Flinux-maple.git sdp: use max number of SGE from HW capabilities Instead of using hard coded max number of SGE's take it from device capabilities. Signed-off-by: Amir Vadai --- diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h index 2d66c36c72384..0e66baeb62010 100644 --- a/drivers/infiniband/ulp/sdp/sdp.h +++ b/drivers/infiniband/ulp/sdp/sdp.h @@ -36,8 +36,8 @@ #define SDP_MAX_RDMA_READ_LEN (PAGE_SIZE * (SDP_FMR_SIZE - 2)) -#define SDP_MAX_RECV_SKB_FRAGS (PAGE_SIZE > 0x8000 ? 1 : 0x8000 / PAGE_SIZE) -#define SDP_MAX_SEND_SKB_FRAGS (SDP_MAX_RECV_SKB_FRAGS + 1) +#define SDP_MAX_RECV_SGES 17 +#define SDP_MAX_SEND_SGES 17 /* skb inlined data len - rest will be rx'ed into frags */ #define SDP_SKB_HEAD_SIZE (0x500 + sizeof(struct sdp_bsdh)) @@ -197,7 +197,7 @@ struct sdp_srcah { struct sdp_buf { struct sk_buff *skb; - u64 mapping[SDP_MAX_SEND_SKB_FRAGS + 1]; + u64 mapping[SDP_MAX_SEND_SGES]; } __attribute__((__packed__)); struct sdp_chrecvbuf { @@ -332,7 +332,6 @@ struct sdp_sock { struct tx_srcavail_state *tx_sa; struct rx_srcavail_state *rx_sa; spinlock_t tx_sa_lock; - int max_send_sge; struct delayed_work srcavail_cancel_work; int srcavail_cancel_mseq; diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c index f35b3c931e704..33c90cf1242cc 100644 --- a/drivers/infiniband/ulp/sdp/sdp_cma.c +++ b/drivers/infiniband/ulp/sdp/sdp_cma.c @@ -82,7 +82,6 @@ static int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id) .event_handler = sdp_qp_event_handler, .cap.max_send_wr = SDP_TX_SIZE, .cap.max_recv_wr = SDP_RX_SIZE, - .cap.max_recv_sge = SDP_MAX_RECV_SKB_FRAGS + 1, .sq_sig_type = IB_SIGNAL_REQ_WR, .qp_type = IB_QPT_RC, }; @@ -92,15 +91,13 @@ static int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id) sdp_dbg(sk, "%s\n", __func__); sdp_sk(sk)->max_sge = sdp_get_max_send_sge(device); - if (sdp_sk(sk)->max_sge < (SDP_MAX_RECV_SKB_FRAGS + 1)) { - sdp_warn(sk, "recv sge's. capability: %d needed: %ld\n", - sdp_sk(sk)->max_sge, SDP_MAX_RECV_SKB_FRAGS + 1); - rc = -ENOMEM; - goto err_rx; - } + sdp_dbg(sk, "Max sges: %d\n", sdp_sk(sk)->max_sge); - qp_init_attr.cap.max_send_sge = sdp_sk(sk)->max_sge; - sdp_dbg(sk, "Setting max send sge to: %d\n", sdp_sk(sk)->max_sge); + qp_init_attr.cap.max_send_sge = MIN(sdp_sk(sk)->max_sge, SDP_MAX_SEND_SGES); + sdp_dbg(sk, "Setting max send sge to: %d\n", qp_init_attr.cap.max_send_sge); + + qp_init_attr.cap.max_recv_sge = MIN(sdp_sk(sk)->max_sge, SDP_MAX_RECV_SGES); + sdp_dbg(sk, "Setting max recv sge to: %d\n", qp_init_attr.cap.max_recv_sge); sdp_sk(sk)->sdp_dev = ib_get_client_data(device, &sdp_client); if (!sdp_sk(sk)->sdp_dev) { @@ -143,6 +140,14 @@ err_rx: return rc; } +static int sdp_get_max_send_frags(u32 buf_size) +{ + return MIN( + /* +1 to conpensate on not aligned buffers */ + (PAGE_ALIGN(buf_size) >> PAGE_SHIFT) + 1, + SDP_MAX_SEND_SGES - 1); +} + static int sdp_connect_handler(struct sock *sk, struct rdma_cm_id *id, struct rdma_cm_event *event) { @@ -187,8 +192,8 @@ static int sdp_connect_handler(struct sock *sk, struct rdma_cm_id *id, sdp_sk(child)->min_bufs = tx_credits(sdp_sk(child)) / 4; sdp_sk(child)->xmit_size_goal = ntohl(h->localrcvsz) - sizeof(struct sdp_bsdh); - sdp_sk(child)->send_frags = PAGE_ALIGN(sdp_sk(child)->xmit_size_goal) / - PAGE_SIZE + 1; /* +1 to conpensate on not aligned buffers */ + + sdp_sk(child)->send_frags = sdp_get_max_send_frags(sdp_sk(child)->xmit_size_goal); sdp_init_buffers(sdp_sk(child), rcvbuf_initial_size); id->context = child; @@ -230,9 +235,7 @@ static int sdp_response_handler(struct sock *sk, struct rdma_cm_id *id, sdp_sk(sk)->min_bufs = tx_credits(sdp_sk(sk)) / 4; sdp_sk(sk)->xmit_size_goal = ntohl(h->actrcvsz) - sizeof(struct sdp_bsdh); - sdp_sk(sk)->send_frags = MIN(PAGE_ALIGN(sdp_sk(sk)->xmit_size_goal) / - PAGE_SIZE, MAX_SKB_FRAGS) + 1; /* +1 to conpensate on not */ - /* aligned buffers */ + sdp_sk(sk)->send_frags = sdp_get_max_send_frags(sdp_sk(sk)->xmit_size_goal); sdp_sk(sk)->xmit_size_goal = MIN(sdp_sk(sk)->xmit_size_goal, sdp_sk(sk)->send_frags * PAGE_SIZE); diff --git a/drivers/infiniband/ulp/sdp/sdp_rx.c b/drivers/infiniband/ulp/sdp/sdp_rx.c index d4ac4aff364d5..1676a425dbd63 100644 --- a/drivers/infiniband/ulp/sdp/sdp_rx.c +++ b/drivers/infiniband/ulp/sdp/sdp_rx.c @@ -149,7 +149,7 @@ static int sdp_post_recv(struct sdp_sock *ssk) u64 addr; struct ib_device *dev; struct ib_recv_wr rx_wr = { NULL }; - struct ib_sge ibsge[SDP_MAX_RECV_SKB_FRAGS + 1]; + struct ib_sge ibsge[SDP_MAX_RECV_SGES]; struct ib_sge *sge = ibsge; struct ib_recv_wr *bad_wr; struct sk_buff *skb; @@ -339,11 +339,19 @@ static inline struct sk_buff *sdp_sock_queue_rcv_skb(struct sock *sk, return skb; } +static int sdp_get_recv_sges(struct sdp_sock *ssk, u32 new_size) +{ + int recv_sges = ssk->max_sge - 1; /* 1 sge is dedicated to sdp header */ + + recv_sges = MIN(recv_sges, PAGE_ALIGN(new_size) >> PAGE_SHIFT); + recv_sges = MIN(recv_sges, SDP_MAX_RECV_SGES - 1); + + return recv_sges; +} + int sdp_init_buffers(struct sdp_sock *ssk, u32 new_size) { - ssk->recv_frags = PAGE_ALIGN(new_size - SDP_SKB_HEAD_SIZE) / PAGE_SIZE; - if (ssk->recv_frags > SDP_MAX_RECV_SKB_FRAGS) - ssk->recv_frags = SDP_MAX_RECV_SKB_FRAGS; + ssk->recv_frags = sdp_get_recv_sges(ssk, new_size); ssk->rcvbuf_scale = rcvbuf_scale; sdp_post_recvs(ssk); @@ -353,16 +361,13 @@ int sdp_init_buffers(struct sdp_sock *ssk, u32 new_size) int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size) { - u32 curr_size = SDP_SKB_HEAD_SIZE + ssk->recv_frags * PAGE_SIZE; - u32 max_size = SDP_SKB_HEAD_SIZE + SDP_MAX_RECV_SKB_FRAGS * PAGE_SIZE; + u32 curr_size = ssk->recv_frags << PAGE_SHIFT; + u32 max_size = (ssk->max_sge - 1) << PAGE_SHIFT; if (new_size > curr_size && new_size <= max_size && sdp_get_large_socket(ssk)) { ssk->rcvbuf_scale = rcvbuf_scale; - ssk->recv_frags = PAGE_ALIGN(new_size - SDP_SKB_HEAD_SIZE) / - PAGE_SIZE; - if (ssk->recv_frags > SDP_MAX_RECV_SKB_FRAGS) - ssk->recv_frags = SDP_MAX_RECV_SKB_FRAGS; + ssk->recv_frags = sdp_get_recv_sges(ssk, new_size); return 0; } else return -1; diff --git a/drivers/infiniband/ulp/sdp/sdp_tx.c b/drivers/infiniband/ulp/sdp/sdp_tx.c index 7d476845cedea..05632d82421c1 100644 --- a/drivers/infiniband/ulp/sdp/sdp_tx.c +++ b/drivers/infiniband/ulp/sdp/sdp_tx.c @@ -72,7 +72,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb) struct ib_device *dev; struct ib_send_wr *bad_wr; - struct ib_sge ibsge[SDP_MAX_SEND_SKB_FRAGS + 1]; + struct ib_sge ibsge[SDP_MAX_SEND_SGES]; struct ib_sge *sge = ibsge; struct ib_send_wr tx_wr = { NULL };