]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sdp: Send small sends using inline
authorAmir Vadai <amirv@mellanox.co.il>
Mon, 8 Nov 2010 11:49:12 +0000 (13:49 +0200)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Tue, 6 Oct 2015 12:05:34 +0000 (05:05 -0700)
Signed-off-by: Amir Vadai <amirv@mellanox.co.il>
drivers/infiniband/ulp/sdp/sdp.h
drivers/infiniband/ulp/sdp/sdp_cma.c
drivers/infiniband/ulp/sdp/sdp_main.c
drivers/infiniband/ulp/sdp/sdp_proc.c
drivers/infiniband/ulp/sdp/sdp_tx.c

index 36c293741a7cfa1a3ce359226af0e557ac244210..c76bd8b0f2ce2fab2a63eefc55e9b7cd6ea73edf 100644 (file)
@@ -35,6 +35,7 @@
 
 #define SDP_TX_SIZE 0x40
 #define SDP_RX_SIZE 0x40
+#define SDP_DEF_INLINE_THRESH 256
 
 #define SDP_FMR_SIZE (MIN(0x1000, PAGE_SIZE) / sizeof(u64))
 
@@ -119,6 +120,7 @@ struct sdp_skb_cb {
                sk_common_release(sk); \
 } while (0)
 
+extern int sdp_inline_thresh;
 extern int sdp_zcopy_thresh;
 extern struct workqueue_struct *sdp_wq;
 extern struct list_head sock_list;
@@ -449,6 +451,7 @@ struct sdp_sock {
 
        /* ZCOPY data: -1:use global; 0:disable zcopy; >0: zcopy threshold */
        int zcopy_thresh;
+       int inline_thresh;
 
        int last_bind_err;
 };
@@ -742,6 +745,7 @@ static inline int credit_update_needed(struct sdp_sock *ssk)
 #define SDPSTATS_MAX_HIST_SIZE 256
 struct sdpstats {
        u32 post_send[256];
+       u32 inline_sends;
        u32 sendmsg_bcopy_segment;
        u32 sendmsg_bzcopy_segment;
        u32 sendmsg_zcopy_segment;
@@ -823,6 +827,9 @@ static inline void sdp_cleanup_sdp_buf(struct sdp_sock *ssk, struct sdp_buf *sbu
        skb = sbuf->skb;
        sbuf->skb = NULL;
 
+       if (!sbuf->mapping[0])
+               return; /* Inlined send - nothing to cleanup */
+
        ib_dma_unmap_single(dev, sbuf->mapping[0], head_size, dir);
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
index c755ef14a94c6af5b0a4d9b56d57e2afa343ae7a..1b7b18cafb348acf5a2f10336d32bebc05ecf9e0 100644 (file)
@@ -79,6 +79,7 @@ static int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
                .event_handler = sdp_qp_event_handler,
                .cap.max_send_wr = SDP_TX_SIZE,
                .cap.max_recv_wr = SDP_RX_SIZE,
+               .cap.max_inline_data = sdp_inline_thresh,
                .sq_sig_type = IB_SIGNAL_REQ_WR,
                .qp_type = IB_QPT_RC,
        };
@@ -87,6 +88,8 @@ static int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
 
        sdp_dbg(sk, "%s\n", __func__);
 
+       sdp_sk(sk)->inline_thresh = sdp_inline_thresh;
+
        sdp_sk(sk)->max_sge = sdp_get_max_dev_sge(device);
        sdp_dbg(sk, "Max sges: %d\n", sdp_sk(sk)->max_sge);
 
index 5895ea4262e207d5e99435b23189a874b22fb9af..38362426991b27e36f777b416e4bc3dee388bb40 100644 (file)
@@ -93,6 +93,10 @@ SDP_MODPARAM_SINT(sdp_fmr_dirty_wm, 5, "Watermark to flush fmr pool");
 SDP_MODPARAM_SINT(recv_poll, 700, "usecs to poll recv before arming interrupt.");
 SDP_MODPARAM_SINT(sdp_keepalive_time, SDP_KEEPALIVE_TIME,
        "Default idle time in seconds before keepalive probe sent.");
+
+SDP_MODPARAM_INT(sdp_inline_thresh, SDP_DEF_INLINE_THRESH,
+       "Inline copy threshold. effective to new sockets only; 0=Off.");
+
 static int sdp_bzcopy_thresh = 0;
 SDP_MODPARAM_INT(sdp_zcopy_thresh, SDP_DEF_ZCOPY_THRESH ,
        "Zero copy using RDMA threshold; 0=Off.");
index 64c4933d34570998788641bf4bf9d8ec464f096c..aa825dcf3823281c84e564d2f4c6054834a93c74 100644 (file)
@@ -316,6 +316,8 @@ static int sdpstats_seq_show(struct seq_file *seq, void *v)
                SDPSTATS_COUNTER_GET(sendmsg));
        seq_printf(seq, "bcopy segments     \t\t: %d\n",
                SDPSTATS_COUNTER_GET(sendmsg_bcopy_segment));
+       seq_printf(seq, "inline sends       \t\t: %d\n",
+               SDPSTATS_COUNTER_GET(inline_sends));
        seq_printf(seq, "bzcopy segments    \t\t: %d\n",
                SDPSTATS_COUNTER_GET(sendmsg_bzcopy_segment));
        seq_printf(seq, "zcopy segments    \t\t: %d\n",
index 92b22269c19af4088c975a3ba5e5ce6fb59119a6..08437398857e45fdcde2396e1567b0950df276d9 100644 (file)
@@ -77,6 +77,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb)
        struct ib_sge ibsge[SDP_MAX_SEND_SGES];
        struct ib_sge *sge = ibsge;
        struct ib_send_wr tx_wr = { NULL };
+       u32 send_flags = IB_SEND_SIGNALED;
 
        SDPSTATS_COUNTER_MID_INC(post_send, h->mid);
        SDPSTATS_HIST(send_size, skb->len);
@@ -117,28 +118,39 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb)
        tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)];
        tx_req->skb = skb;
        dev = ssk->ib_device;
-       addr = ib_dma_map_single(dev, skb->data, skb->len - skb->data_len,
-                                DMA_TO_DEVICE);
-       tx_req->mapping[0] = addr;
-
-       /* TODO: proper error handling */
-       BUG_ON(ib_dma_mapping_error(dev, addr));
-
-       sge->addr = addr;
-       sge->length = skb->len - skb->data_len;
-       sge->lkey = ssk->sdp_dev->mr->lkey;
-       frags = skb_shinfo(skb)->nr_frags;
-       for (i = 0; i < frags; ++i) {
-               ++sge;
-               addr = ib_dma_map_page(dev, skb_shinfo(skb)->frags[i].page,
-                                      skb_shinfo(skb)->frags[i].page_offset,
-                                      skb_shinfo(skb)->frags[i].size,
-                                      DMA_TO_DEVICE);
+
+       if (skb->len <= ssk->inline_thresh && !skb_shinfo(skb)->nr_frags) {
+               SDPSTATS_COUNTER_INC(inline_sends);
+               sge->addr = (u64) skb->data;
+               sge->length = skb->len;
+               sge->lkey = 0;
+               frags = 0;
+               tx_req->mapping[0] = 0; /* Nothing to be cleaned up by sdp_cleanup_sdp_buf() */
+               send_flags |= IB_SEND_INLINE;
+       } else {
+               addr = ib_dma_map_single(dev, skb->data, skb->len - skb->data_len,
+                               DMA_TO_DEVICE);
+               tx_req->mapping[0] = addr;
+
+               /* TODO: proper error handling */
                BUG_ON(ib_dma_mapping_error(dev, addr));
-               tx_req->mapping[i + 1] = addr;
+
                sge->addr = addr;
-               sge->length = skb_shinfo(skb)->frags[i].size;
+               sge->length = skb->len - skb->data_len;
                sge->lkey = ssk->sdp_dev->mr->lkey;
+               frags = skb_shinfo(skb)->nr_frags;
+               for (i = 0; i < frags; ++i) {
+                       ++sge;
+                       addr = ib_dma_map_page(dev, skb_shinfo(skb)->frags[i].page,
+                                       skb_shinfo(skb)->frags[i].page_offset,
+                                       skb_shinfo(skb)->frags[i].size,
+                                       DMA_TO_DEVICE);
+                       BUG_ON(ib_dma_mapping_error(dev, addr));
+                       tx_req->mapping[i + 1] = addr;
+                       sge->addr = addr;
+                       sge->length = skb_shinfo(skb)->frags[i].size;
+                       sge->lkey = ssk->sdp_dev->mr->lkey;
+               }
        }
 
        tx_wr.next = NULL;
@@ -146,7 +158,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb)
        tx_wr.sg_list = ibsge;
        tx_wr.num_sge = frags + 1;
        tx_wr.opcode = IB_WR_SEND;
-       tx_wr.send_flags = IB_SEND_SIGNALED;
+       tx_wr.send_flags = send_flags;
        if (unlikely(SDP_SKB_CB(skb)->flags & TCPCB_FLAG_URG))
                tx_wr.send_flags |= IB_SEND_SOLICITED;