/* set when SrcAvail received, reset when SendSM/RdmaRdCompl sent */
struct rx_srcavail_state *rx_sa;
-
+ int sa_post_sendsm; /* Need to send SendSM */
+ int sa_post_rdma_rd_compl; /* Number of finished RDMA read bytes not reported */
+ /* If > 0, need to send RdmaRdCompl */
u32 sa_cancel_mseq;
int sa_cancel_arrived; /* is 'sa_cancel_mseq' relevant or not, sticky */
int sdp_tx_wait_memory(struct sdp_sock *ssk, long *timeo_p, int *credits_needed);
void sdp_skb_entail(struct sock *sk, struct sk_buff *skb);
void sdp_start_cma_timewait_timeout(struct sdp_sock *ssk, int timeo);
-int sdp_abort_rx_srcavail(struct sock *sk);
+int sdp_abort_rx_srcavail(struct sock *sk, int post_sendsm);
extern struct rw_semaphore device_removal_lock;
/* sdp_proc.c */
void sdp_tx_ring_destroy(struct sdp_sock *ssk);
int sdp_xmit_poll(struct sdp_sock *ssk, int force);
void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb);
-void sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp);
+int sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp);
void sdp_nagle_timeout(unsigned long data);
void sdp_post_keepalive(struct sdp_sock *ssk);
somebody_is_waiting(sk);
}
-void sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp)
+int sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp)
{
/* TODO: nonagle? */
struct sk_buff *skb;
int post_count = 0;
struct sock *sk = sk_ssk(ssk);
+ int min_credits = SDP_MIN_TX_CREDITS;
+
+ if (ssk->rx_sa)
+ min_credits += 2; /* Save 2 credits, one for RdmaRdCompl and one for SendSM */
if (unlikely(!ssk->id)) {
if (sk->sk_send_head) {
/* TODO: flush send queue? */
sdp_reset(sk);
}
- return;
+ return -ECONNRESET;
}
again:
if (sdp_tx_ring_slots_left(ssk) < SDP_TX_SIZE / 2)
sdp_xmit_poll(ssk, 1);
/* Run out of credits, check if got a credit update */
- if (unlikely(tx_credits(ssk) <= SDP_MIN_TX_CREDITS)) {
+ if (unlikely(tx_credits(ssk) <= min_credits)) {
sdp_poll_rx_cq(ssk);
if (unlikely(sdp_should_rearm(sk) || !posts_handler(ssk)))
sdp_arm_rx_cq(sk);
}
+ if (unlikely((ssk->sa_post_rdma_rd_compl || ssk->sa_post_sendsm) &&
+ tx_credits(ssk) <= SDP_MIN_TX_CREDITS)) {
+ sdp_warn(sk, "Run out of credits, can't abort SrcAvail. "
+ "RdmaRdCompl: %d SendSm: %d\n",
+ ssk->sa_post_rdma_rd_compl, ssk->sa_post_sendsm);
+ }
+
+ if (ssk->sa_post_rdma_rd_compl && tx_credits(ssk) > SDP_MIN_TX_CREDITS) {
+ int unreported = ssk->sa_post_rdma_rd_compl;
+
+ skb = sdp_alloc_skb_rdmardcompl(sk, unreported, 0);
+ if (!skb)
+ goto no_mem;
+ sdp_post_send(ssk, skb);
+ post_count++;
+ ssk->sa_post_rdma_rd_compl = 0;
+ }
+
+ if (ssk->sa_post_sendsm && tx_credits(ssk) > SDP_MIN_TX_CREDITS) {
+ skb = sdp_alloc_skb_sendsm(sk, 0);
+ if (unlikely(!skb))
+ goto no_mem;
+ sdp_post_send(ssk, skb);
+ ssk->sa_post_sendsm = 0;
+ post_count++;
+ }
+
if (ssk->recv_request &&
ring_tail(ssk->rx_ring) >= ssk->recv_request_head &&
- tx_credits(ssk) >= SDP_MIN_TX_CREDITS &&
+ tx_credits(ssk) >= min_credits &&
sdp_tx_ring_slots_left(ssk)) {
skb = sdp_alloc_skb_chrcvbuf_ack(sk,
ssk->recv_frags * PAGE_SIZE, gfp);
- if (likely(skb)) {
- ssk->recv_request = 0;
- sdp_post_send(ssk, skb);
- post_count++;
- }
+ if (!skb)
+ goto no_mem;
+ ssk->recv_request = 0;
+ sdp_post_send(ssk, skb);
+ post_count++;
}
- if (tx_credits(ssk) <= SDP_MIN_TX_CREDITS &&
+ if (tx_credits(ssk) <= min_credits &&
sdp_tx_ring_slots_left(ssk) &&
sk->sk_send_head &&
sdp_nagle_off(ssk, sk->sk_send_head)) {
SDPSTATS_COUNTER_INC(send_miss_no_credits);
}
- while (tx_credits(ssk) > SDP_MIN_TX_CREDITS &&
+ while (tx_credits(ssk) > min_credits &&
sdp_tx_ring_slots_left(ssk) &&
(skb = sk->sk_send_head) &&
sdp_nagle_off(ssk, skb)) {
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
skb = sdp_alloc_skb_data(sk, 0, gfp);
- if (likely(skb)) {
- sdp_post_send(ssk, skb);
- SDPSTATS_COUNTER_INC(post_send_credits);
- post_count++;
- }
+ if (!skb)
+ goto no_mem;
+ sdp_post_send(ssk, skb);
+ SDPSTATS_COUNTER_INC(post_send_credits);
+ post_count++;
}
/* send DisConn if needed
!sk->sk_send_head &&
tx_credits(ssk) > 1) {
skb = sdp_alloc_skb_disconnect(sk, gfp);
- if (likely(skb)) {
- ssk->sdp_disconnect = 0;
- sdp_post_send(ssk, skb);
- post_count++;
- }
+ if (!skb)
+ goto no_mem;
+ ssk->sdp_disconnect = 0;
+ sdp_post_send(ssk, skb);
+ post_count++;
}
if (!sdp_tx_ring_slots_left(ssk) || post_count) {
if (sdp_xmit_poll(ssk, 1))
goto again;
}
+
+no_mem:
+ return post_count;
}
/* QP is destroyed, so no one will queue skbs anymore. */
if (ssk->rx_sa)
- sdp_abort_rx_srcavail(sk);
+ sdp_abort_rx_srcavail(sk, 0);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&ssk->rx_ctl_q);
if (h->mid == SDP_MID_DISCONN) {
sdp_handle_disconn(sk);
} else {
- if (h->mid == SDP_MID_SRCAVAIL && sdp_sk(sk)->rx_sa) {
- sdp_abort_rx_srcavail(sk);
- sdp_post_sendsm(sk);
- }
+ if (h->mid == SDP_MID_SRCAVAIL && sdp_sk(sk)->rx_sa)
+ sdp_abort_rx_srcavail(sk, 1);
sdp_dbg(sk, "Data was unread. skb: %p\n", skb);
data_was_unread = 1;
return err;
}
-int sdp_abort_rx_srcavail(struct sock *sk)
+int sdp_abort_rx_srcavail(struct sock *sk, int post_sendsm)
{
struct sdp_sock *ssk = sdp_sk(sk);
struct sdp_bsdh *h =
h->mid = SDP_MID_DATA;
- if (sdp_post_rdma_rd_compl(sk, ssk->rx_sa)) {
- sdp_warn(sk, "Couldn't send RdmaRdComp - "
- "data corruption might occur\n");
- }
+ sdp_post_rdma_rd_compl(sk, ssk->rx_sa);
+ if (post_sendsm)
+ sdp_post_sendsm(sk);
+ sdp_do_posts(ssk);
RX_SRCAVAIL_STATE(ssk->rx_sa->skb) = NULL;
kfree(ssk->rx_sa);
u32 peek_seq;
u32 *seq;
int copied = 0;
- int rc;
int avail_bytes_count = 0; /* Could be inlined in skb */
/* or advertised for RDMA */
SDPSTATS_COUNTER_INC(recvmsg);
sdp_dbg_data(sk, "Aborting SA "
"due to SACancel or "
"no fmr pool\n");
- sdp_abort_rx_srcavail(sk);
- sdp_post_sendsm(sk);
+ sdp_abort_rx_srcavail(sk, 1);
rx_sa = NULL;
check_srcavail_skb:
if (offset < skb->len) {
SDP_SKB_CB(skb)->seq;
sdp_dbg_data(sk, "Peek on RDMA data - "
"fallback to BCopy\n");
- sdp_abort_rx_srcavail(sk);
- sdp_post_sendsm(sk);
+ sdp_abort_rx_srcavail(sk, 1);
rx_sa = NULL;
if (real_offset >= skb->len)
goto force_skb_cleanup;
if (unlikely(err)) {
/* ssk->rx_sa might had been freed when
* we slept. */
- if (ssk->rx_sa) {
- sdp_abort_rx_srcavail(sk);
- sdp_post_sendsm(sk);
- }
+ if (ssk->rx_sa)
+ sdp_abort_rx_srcavail(sk, 1);
rx_sa = NULL;
if (err == -EAGAIN || err == -ETIME)
goto skb_cleanup;
len -= used;
*seq += used;
offset = *seq - SDP_SKB_CB(skb)->seq;
- sdp_dbg_data(sk, "done copied %d target %d\n", copied, target);
+ sdp_dbg_data(sk, "done copied 0x%x target 0x%x\n", copied, target);
sdp_do_posts(sdp_sk(sk));
if (rx_sa && !ssk->rx_sa) {
if (rx_sa && !(flags & MSG_PEEK)) {
- rc = sdp_post_rdma_rd_compl(sk, rx_sa);
- if (unlikely(rc)) {
- sdp_abort_rx_srcavail(sk);
+
+ if (likely(tx_credits(ssk) > (SDP_MIN_TX_CREDITS + 2))) {
+ sdp_post_rdma_rd_compl(sk, rx_sa);
+ sdp_post_sends(ssk, 0);
+ } else {
+ sdp_dbg_data(sk, "Run out of credits. Aborting RX "
+ "SrcAvail - or else won't be able to "
+ "send RdmaRdCompl/SendSM\n");
+ sdp_abort_rx_srcavail(sk, 1);
rx_sa = NULL;
- err = rc;
- goto out;
}
}
/* ssk->rx_sa might had been freed when we slept.
*/
if (ssk->rx_sa)
- sdp_abort_rx_srcavail(sk);
+ sdp_abort_rx_srcavail(sk, 0);
rx_sa = NULL;
}
force_skb_cleanup:
case SDP_MID_SRCAVAIL_CANCEL:
if (ssk->rx_sa && after(ntohl(h->mseq), ssk->rx_sa->mseq) &&
!ssk->tx_ring.rdma_inflight) {
- sdp_abort_rx_srcavail(sk);
- sdp_post_sendsm(sk);
+ sdp_abort_rx_srcavail(sk, 1);
}
break;
case SDP_MID_SINKAVAIL:
sdp_dbg_data(sk, "SrcAvail in the middle of another SrcAvail. Aborting\n");
h->mid = SDP_MID_DATA;
sdp_post_sendsm(sk);
+ sdp_do_posts(ssk);
} else {
skb_pull(skb, sizeof(struct sdp_srcah));
}
int sdp_post_rdma_rd_compl(struct sock *sk, struct rx_srcavail_state *rx_sa)
{
- struct sk_buff *skb;
int unreported = rx_sa->copied - rx_sa->reported;
if (rx_sa->copied <= rx_sa->reported)
return 0;
- skb = sdp_alloc_skb_rdmardcompl(sk, unreported, 0);
- if (unlikely(!skb))
- return -ENOMEM;
-
- sdp_skb_entail(sk, skb);
-
+ sdp_sk(sk)->sa_post_rdma_rd_compl += unreported;
rx_sa->reported += unreported;
- sdp_post_sends(sdp_sk(sk), 0);
-
return 0;
}
int sdp_post_sendsm(struct sock *sk)
{
- struct sk_buff *skb = sdp_alloc_skb_sendsm(sk, 0);
-
- if (unlikely(!skb))
- return -ENOMEM;
-
- sdp_skb_entail(sk, skb);
+ struct sdp_sock *ssk = sdp_sk(sk);
- sdp_post_sends(sdp_sk(sk), 0);
+ ssk->sa_post_sendsm = 1;
return 0;
}