void sdp_time_wait_destroy_sk(struct sdp_sock *ssk);
void sdp_completion_handler(struct ib_cq *cq, void *cq_context);
void sdp_work(struct work_struct *work);
+int sdp_post_credits(struct sdp_sock *ssk);
void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid);
void sdp_post_recvs(struct sdp_sock *ssk);
int sdp_poll_cq(struct sdp_sock *ssk, struct ib_cq *cq);
(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_PSH);
}
+int sdp_post_credits(struct sdp_sock *ssk)
+{
+ if (likely(ssk->bufs > 1) &&
+ likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+ struct sk_buff *skb;
+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
+ sizeof(struct sdp_bsdh),
+ GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ sdp_post_send(ssk, skb, SDP_MID_DATA);
+ }
+ return 0;
+}
+
void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
{
/* TODO: nonagle? */
rdma_reject(id, NULL, 0);
else
rc = rdma_accept(id, NULL);
+
+ if (!rc)
+ rc = sdp_post_credits(sdp_sk(sk));
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
sdp_dbg(sk, "RDMA_CM_EVENT_CONNECT_ERROR\n");