From: Amir Vadai Date: Mon, 8 Nov 2010 11:49:12 +0000 (+0200) Subject: sdp: Send small sends using inline X-Git-Tag: v4.1.12-92~264^2~5^2~64 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=38175508d177d3d4e78bcc4c3c99759fbc6c6f5d;p=users%2Fjedix%2Flinux-maple.git sdp: Send small sends using inline Signed-off-by: Amir Vadai --- diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h index 36c293741a7cf..c76bd8b0f2ce2 100644 --- a/drivers/infiniband/ulp/sdp/sdp.h +++ b/drivers/infiniband/ulp/sdp/sdp.h @@ -35,6 +35,7 @@ #define SDP_TX_SIZE 0x40 #define SDP_RX_SIZE 0x40 +#define SDP_DEF_INLINE_THRESH 256 #define SDP_FMR_SIZE (MIN(0x1000, PAGE_SIZE) / sizeof(u64)) @@ -119,6 +120,7 @@ struct sdp_skb_cb { sk_common_release(sk); \ } while (0) +extern int sdp_inline_thresh; extern int sdp_zcopy_thresh; extern struct workqueue_struct *sdp_wq; extern struct list_head sock_list; @@ -449,6 +451,7 @@ struct sdp_sock { /* ZCOPY data: -1:use global; 0:disable zcopy; >0: zcopy threshold */ int zcopy_thresh; + int inline_thresh; int last_bind_err; }; @@ -742,6 +745,7 @@ static inline int credit_update_needed(struct sdp_sock *ssk) #define SDPSTATS_MAX_HIST_SIZE 256 struct sdpstats { u32 post_send[256]; + u32 inline_sends; u32 sendmsg_bcopy_segment; u32 sendmsg_bzcopy_segment; u32 sendmsg_zcopy_segment; @@ -823,6 +827,9 @@ static inline void sdp_cleanup_sdp_buf(struct sdp_sock *ssk, struct sdp_buf *sbu skb = sbuf->skb; sbuf->skb = NULL; + if (!sbuf->mapping[0]) + return; /* Inlined send - nothing to cleanup */ + ib_dma_unmap_single(dev, sbuf->mapping[0], head_size, dir); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c index c755ef14a94c6..1b7b18cafb348 100644 --- a/drivers/infiniband/ulp/sdp/sdp_cma.c +++ b/drivers/infiniband/ulp/sdp/sdp_cma.c @@ -79,6 +79,7 @@ static int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id) .event_handler = sdp_qp_event_handler, .cap.max_send_wr = SDP_TX_SIZE, .cap.max_recv_wr = SDP_RX_SIZE, + .cap.max_inline_data = sdp_inline_thresh, .sq_sig_type = IB_SIGNAL_REQ_WR, .qp_type = IB_QPT_RC, }; @@ -87,6 +88,8 @@ static int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id) sdp_dbg(sk, "%s\n", __func__); + sdp_sk(sk)->inline_thresh = sdp_inline_thresh; + sdp_sk(sk)->max_sge = sdp_get_max_dev_sge(device); sdp_dbg(sk, "Max sges: %d\n", sdp_sk(sk)->max_sge); diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c index 5895ea4262e20..38362426991b2 100644 --- a/drivers/infiniband/ulp/sdp/sdp_main.c +++ b/drivers/infiniband/ulp/sdp/sdp_main.c @@ -93,6 +93,10 @@ SDP_MODPARAM_SINT(sdp_fmr_dirty_wm, 5, "Watermark to flush fmr pool"); SDP_MODPARAM_SINT(recv_poll, 700, "usecs to poll recv before arming interrupt."); SDP_MODPARAM_SINT(sdp_keepalive_time, SDP_KEEPALIVE_TIME, "Default idle time in seconds before keepalive probe sent."); + +SDP_MODPARAM_INT(sdp_inline_thresh, SDP_DEF_INLINE_THRESH, + "Inline copy threshold. effective to new sockets only; 0=Off."); + static int sdp_bzcopy_thresh = 0; SDP_MODPARAM_INT(sdp_zcopy_thresh, SDP_DEF_ZCOPY_THRESH , "Zero copy using RDMA threshold; 0=Off."); diff --git a/drivers/infiniband/ulp/sdp/sdp_proc.c b/drivers/infiniband/ulp/sdp/sdp_proc.c index 64c4933d34570..aa825dcf38232 100644 --- a/drivers/infiniband/ulp/sdp/sdp_proc.c +++ b/drivers/infiniband/ulp/sdp/sdp_proc.c @@ -316,6 +316,8 @@ static int sdpstats_seq_show(struct seq_file *seq, void *v) SDPSTATS_COUNTER_GET(sendmsg)); seq_printf(seq, "bcopy segments \t\t: %d\n", SDPSTATS_COUNTER_GET(sendmsg_bcopy_segment)); + seq_printf(seq, "inline sends \t\t: %d\n", + SDPSTATS_COUNTER_GET(inline_sends)); seq_printf(seq, "bzcopy segments \t\t: %d\n", SDPSTATS_COUNTER_GET(sendmsg_bzcopy_segment)); seq_printf(seq, "zcopy segments \t\t: %d\n", diff --git a/drivers/infiniband/ulp/sdp/sdp_tx.c b/drivers/infiniband/ulp/sdp/sdp_tx.c index 92b22269c19af..08437398857e4 100644 --- a/drivers/infiniband/ulp/sdp/sdp_tx.c +++ b/drivers/infiniband/ulp/sdp/sdp_tx.c @@ -77,6 +77,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb) struct ib_sge ibsge[SDP_MAX_SEND_SGES]; struct ib_sge *sge = ibsge; struct ib_send_wr tx_wr = { NULL }; + u32 send_flags = IB_SEND_SIGNALED; SDPSTATS_COUNTER_MID_INC(post_send, h->mid); SDPSTATS_HIST(send_size, skb->len); @@ -117,28 +118,39 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb) tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)]; tx_req->skb = skb; dev = ssk->ib_device; - addr = ib_dma_map_single(dev, skb->data, skb->len - skb->data_len, - DMA_TO_DEVICE); - tx_req->mapping[0] = addr; - - /* TODO: proper error handling */ - BUG_ON(ib_dma_mapping_error(dev, addr)); - - sge->addr = addr; - sge->length = skb->len - skb->data_len; - sge->lkey = ssk->sdp_dev->mr->lkey; - frags = skb_shinfo(skb)->nr_frags; - for (i = 0; i < frags; ++i) { - ++sge; - addr = ib_dma_map_page(dev, skb_shinfo(skb)->frags[i].page, - skb_shinfo(skb)->frags[i].page_offset, - skb_shinfo(skb)->frags[i].size, - DMA_TO_DEVICE); + + if (skb->len <= ssk->inline_thresh && !skb_shinfo(skb)->nr_frags) { + SDPSTATS_COUNTER_INC(inline_sends); + sge->addr = (u64) skb->data; + sge->length = skb->len; + sge->lkey = 0; + frags = 0; + tx_req->mapping[0] = 0; /* Nothing to be cleaned up by sdp_cleanup_sdp_buf() */ + send_flags |= IB_SEND_INLINE; + } else { + addr = ib_dma_map_single(dev, skb->data, skb->len - skb->data_len, + DMA_TO_DEVICE); + tx_req->mapping[0] = addr; + + /* TODO: proper error handling */ BUG_ON(ib_dma_mapping_error(dev, addr)); - tx_req->mapping[i + 1] = addr; + sge->addr = addr; - sge->length = skb_shinfo(skb)->frags[i].size; + sge->length = skb->len - skb->data_len; sge->lkey = ssk->sdp_dev->mr->lkey; + frags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < frags; ++i) { + ++sge; + addr = ib_dma_map_page(dev, skb_shinfo(skb)->frags[i].page, + skb_shinfo(skb)->frags[i].page_offset, + skb_shinfo(skb)->frags[i].size, + DMA_TO_DEVICE); + BUG_ON(ib_dma_mapping_error(dev, addr)); + tx_req->mapping[i + 1] = addr; + sge->addr = addr; + sge->length = skb_shinfo(skb)->frags[i].size; + sge->lkey = ssk->sdp_dev->mr->lkey; + } } tx_wr.next = NULL; @@ -146,7 +158,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb) tx_wr.sg_list = ibsge; tx_wr.num_sge = frags + 1; tx_wr.opcode = IB_WR_SEND; - tx_wr.send_flags = IB_SEND_SIGNALED; + tx_wr.send_flags = send_flags; if (unlikely(SDP_SKB_CB(skb)->flags & TCPCB_FLAG_URG)) tx_wr.send_flags |= IB_SEND_SOLICITED;