#define SDP_MAX_RDMA_READ_LEN (PAGE_SIZE * (SDP_FMR_SIZE - 2))
-#define SDP_MAX_RECV_SGES 17
-#define SDP_MAX_SEND_SGES 17
+#define SDP_MAX_RECV_SGES 9 /* 1 for sdp header + 8 for payload */
+#define SDP_MAX_SEND_SGES 9 /* same as above */
/* skb inlined data len - rest will be rx'ed into frags */
#define SDP_SKB_HEAD_SIZE (0x500 + sizeof(struct sdp_bsdh))
#define SDPSTATS_HIST(stat, size)
#endif
+static inline void sdp_cleanup_sdp_buf(struct sdp_sock *ssk, struct sdp_buf *sbuf,
+ size_t head_size, enum dma_data_direction dir)
+{
+ int i;
+ struct sk_buff *skb;
+ struct ib_device *dev = ssk->ib_device;
+
+ skb = sbuf->skb;
+
+ ib_dma_unmap_single(dev, sbuf->mapping[0], head_size, dir);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ ib_dma_unmap_page(dev, sbuf->mapping[i + 1],
+ skb_shinfo(skb)->frags[i].size,
+ dir);
+ }
+}
+
/* sdp_main.c */
void sdp_set_default_moderation(struct sdp_sock *ssk);
int sdp_init_sock(struct sock *sk);
{
}
-static int sdp_get_max_send_sge(struct ib_device *dev)
+static int sdp_get_max_dev_sge(struct ib_device *dev)
{
struct ib_device_attr attr;
static int max_sges = -1;
sdp_dbg(sk, "%s\n", __func__);
- sdp_sk(sk)->max_sge = sdp_get_max_send_sge(device);
+ sdp_sk(sk)->max_sge = sdp_get_max_dev_sge(device);
sdp_dbg(sk, "Max sges: %d\n", sdp_sk(sk)->max_sge);
qp_init_attr.cap.max_send_sge = MIN(sdp_sk(sk)->max_sge, SDP_MAX_SEND_SGES);
if (unlikely(rc)) {
sdp_warn(&ssk->isk.sk, "ib_post_recv failed. status %d\n", rc);
+ sdp_cleanup_sdp_buf(ssk, rx_req, SDP_SKB_HEAD_SIZE, DMA_FROM_DEVICE);
+ __kfree_skb(skb);
+
sdp_reset(&ssk->isk.sk);
return -1;
struct sdp_buf *rx_req;
struct ib_device *dev;
struct sk_buff *skb;
- int i, frags;
if (unlikely(id != ring_tail(ssk->rx_ring))) {
printk(KERN_WARNING "Bogus recv completion id %d tail %d\n",
dev = ssk->ib_device;
rx_req = &ssk->rx_ring.buffer[id & (SDP_RX_SIZE - 1)];
skb = rx_req->skb;
- ib_dma_unmap_single(dev, rx_req->mapping[0], SDP_SKB_HEAD_SIZE,
- DMA_FROM_DEVICE);
- frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < frags; ++i)
- ib_dma_unmap_page(dev, rx_req->mapping[i + 1],
- skb_shinfo(skb)->frags[i].size,
- DMA_FROM_DEVICE);
+ sdp_cleanup_sdp_buf(ssk, rx_req, SDP_SKB_HEAD_SIZE, DMA_FROM_DEVICE);
+
atomic_inc(&ssk->rx_ring.tail);
atomic_dec(&ssk->remote_credits);
return skb;
tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &tx_wr, &bad_wr);
- atomic_inc(&ssk->tx_ring.head);
- atomic_dec(&ssk->tx_ring.credits);
- atomic_set(&ssk->remote_credits, rx_ring_posted(ssk));
if (unlikely(rc)) {
sdp_dbg(&ssk->isk.sk,
"ib_post_send failed with status %d.\n", rc);
+
+ sdp_cleanup_sdp_buf(ssk, tx_req, skb->len - skb->data_len, DMA_TO_DEVICE);
+
sdp_set_error(&ssk->isk.sk, -ECONNRESET);
wake_up(&ssk->wq);
+
+ goto err;
}
+ atomic_inc(&ssk->tx_ring.head);
+ atomic_dec(&ssk->tx_ring.credits);
+ atomic_set(&ssk->remote_credits, rx_ring_posted(ssk));
+
return;
err:
struct ib_device *dev;
struct sdp_buf *tx_req;
struct sk_buff *skb = NULL;
- int i, frags;
struct sdp_tx_ring *tx_ring = &ssk->tx_ring;
if (unlikely(mseq != ring_tail(*tx_ring))) {
printk(KERN_WARNING "Bogus send completion id %d tail %d\n",
dev = ssk->ib_device;
tx_req = &tx_ring->buffer[mseq & (SDP_TX_SIZE - 1)];
skb = tx_req->skb;
- ib_dma_unmap_single(dev, tx_req->mapping[0], skb->len - skb->data_len,
- DMA_TO_DEVICE);
- frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < frags; ++i) {
- ib_dma_unmap_page(dev, tx_req->mapping[i + 1],
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
- }
+
+ sdp_cleanup_sdp_buf(ssk, tx_req, skb->len - skb->data_len, DMA_TO_DEVICE);
tx_ring->una_seq += SDP_SKB_CB(skb)->end_seq;