#define SDP_TX_SIZE 0x40
#define SDP_RX_SIZE 0x40
+#define SDP_DEF_INLINE_THRESH 256
#define SDP_FMR_SIZE (MIN(0x1000, PAGE_SIZE) / sizeof(u64))
sk_common_release(sk); \
} while (0)
+extern int sdp_inline_thresh;
extern int sdp_zcopy_thresh;
extern struct workqueue_struct *sdp_wq;
extern struct list_head sock_list;
/* ZCOPY data: -1:use global; 0:disable zcopy; >0: zcopy threshold */
int zcopy_thresh;
+ int inline_thresh;
int last_bind_err;
};
#define SDPSTATS_MAX_HIST_SIZE 256
struct sdpstats {
u32 post_send[256];
+ u32 inline_sends;
u32 sendmsg_bcopy_segment;
u32 sendmsg_bzcopy_segment;
u32 sendmsg_zcopy_segment;
skb = sbuf->skb;
sbuf->skb = NULL;
+ if (!sbuf->mapping[0])
+ return; /* Inlined send - nothing to cleanup */
+
ib_dma_unmap_single(dev, sbuf->mapping[0], head_size, dir);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
.event_handler = sdp_qp_event_handler,
.cap.max_send_wr = SDP_TX_SIZE,
.cap.max_recv_wr = SDP_RX_SIZE,
+ .cap.max_inline_data = sdp_inline_thresh,
.sq_sig_type = IB_SIGNAL_REQ_WR,
.qp_type = IB_QPT_RC,
};
sdp_dbg(sk, "%s\n", __func__);
+ sdp_sk(sk)->inline_thresh = sdp_inline_thresh;
+
sdp_sk(sk)->max_sge = sdp_get_max_dev_sge(device);
sdp_dbg(sk, "Max sges: %d\n", sdp_sk(sk)->max_sge);
SDP_MODPARAM_SINT(recv_poll, 700, "usecs to poll recv before arming interrupt.");
SDP_MODPARAM_SINT(sdp_keepalive_time, SDP_KEEPALIVE_TIME,
"Default idle time in seconds before keepalive probe sent.");
+
+SDP_MODPARAM_INT(sdp_inline_thresh, SDP_DEF_INLINE_THRESH,
+ "Inline copy threshold. effective to new sockets only; 0=Off.");
+
static int sdp_bzcopy_thresh = 0;
SDP_MODPARAM_INT(sdp_zcopy_thresh, SDP_DEF_ZCOPY_THRESH ,
"Zero copy using RDMA threshold; 0=Off.");
SDPSTATS_COUNTER_GET(sendmsg));
seq_printf(seq, "bcopy segments \t\t: %d\n",
SDPSTATS_COUNTER_GET(sendmsg_bcopy_segment));
+ seq_printf(seq, "inline sends \t\t: %d\n",
+ SDPSTATS_COUNTER_GET(inline_sends));
seq_printf(seq, "bzcopy segments \t\t: %d\n",
SDPSTATS_COUNTER_GET(sendmsg_bzcopy_segment));
seq_printf(seq, "zcopy segments \t\t: %d\n",
struct ib_sge ibsge[SDP_MAX_SEND_SGES];
struct ib_sge *sge = ibsge;
struct ib_send_wr tx_wr = { NULL };
+ u32 send_flags = IB_SEND_SIGNALED;
SDPSTATS_COUNTER_MID_INC(post_send, h->mid);
SDPSTATS_HIST(send_size, skb->len);
tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)];
tx_req->skb = skb;
dev = ssk->ib_device;
- addr = ib_dma_map_single(dev, skb->data, skb->len - skb->data_len,
- DMA_TO_DEVICE);
- tx_req->mapping[0] = addr;
-
- /* TODO: proper error handling */
- BUG_ON(ib_dma_mapping_error(dev, addr));
-
- sge->addr = addr;
- sge->length = skb->len - skb->data_len;
- sge->lkey = ssk->sdp_dev->mr->lkey;
- frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < frags; ++i) {
- ++sge;
- addr = ib_dma_map_page(dev, skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].page_offset,
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
+
+ if (skb->len <= ssk->inline_thresh && !skb_shinfo(skb)->nr_frags) {
+ SDPSTATS_COUNTER_INC(inline_sends);
+ sge->addr = (u64) skb->data;
+ sge->length = skb->len;
+ sge->lkey = 0;
+ frags = 0;
+ tx_req->mapping[0] = 0; /* Nothing to be cleaned up by sdp_cleanup_sdp_buf() */
+ send_flags |= IB_SEND_INLINE;
+ } else {
+ addr = ib_dma_map_single(dev, skb->data, skb->len - skb->data_len,
+ DMA_TO_DEVICE);
+ tx_req->mapping[0] = addr;
+
+ /* TODO: proper error handling */
BUG_ON(ib_dma_mapping_error(dev, addr));
- tx_req->mapping[i + 1] = addr;
+
sge->addr = addr;
- sge->length = skb_shinfo(skb)->frags[i].size;
+ sge->length = skb->len - skb->data_len;
sge->lkey = ssk->sdp_dev->mr->lkey;
+ frags = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < frags; ++i) {
+ ++sge;
+ addr = ib_dma_map_page(dev, skb_shinfo(skb)->frags[i].page,
+ skb_shinfo(skb)->frags[i].page_offset,
+ skb_shinfo(skb)->frags[i].size,
+ DMA_TO_DEVICE);
+ BUG_ON(ib_dma_mapping_error(dev, addr));
+ tx_req->mapping[i + 1] = addr;
+ sge->addr = addr;
+ sge->length = skb_shinfo(skb)->frags[i].size;
+ sge->lkey = ssk->sdp_dev->mr->lkey;
+ }
}
tx_wr.next = NULL;
tx_wr.sg_list = ibsge;
tx_wr.num_sge = frags + 1;
tx_wr.opcode = IB_WR_SEND;
- tx_wr.send_flags = IB_SEND_SIGNALED;
+ tx_wr.send_flags = send_flags;
if (unlikely(SDP_SKB_CB(skb)->flags & TCPCB_FLAG_URG))
tx_wr.send_flags |= IB_SEND_SOLICITED;