case SDP_MID_SRCAVAIL:
rx_sa = RX_SRCAVAIL_STATE(skb);
- if (rx_sa->flags & RX_SA_ABORTED) {
+ if (rx_sa->mseq < ssk->srcavail_cancel_mseq) {
sdp_dbg_data(sk, "Ignoring src avail "
"due to SrcAvailCancel\n");
sdp_post_sendsm(sk);
if (!rx_sa && used + offset < skb->len)
continue;
- if (rx_sa && !(rx_sa->flags & RX_SA_ABORTED) &&
- rx_sa->used < rx_sa->len)
+ if (rx_sa && rx_sa->used < rx_sa->len)
continue;
offset = 0;
seq_printf(seq, "ZCopy stats:\n");
seq_printf(seq, "- TX timeout\t\t: %d\n", SDPSTATS_COUNTER_GET(zcopy_tx_timeout));
seq_printf(seq, "- TX cross send\t\t: %d\n", SDPSTATS_COUNTER_GET(zcopy_cross_send));
- seq_printf(seq, "- TX aborted by peer\t\t: %d\n", SDPSTATS_COUNTER_GET(zcopy_tx_aborted));
+ seq_printf(seq, "- TX aborted by peer\t: %d\n", SDPSTATS_COUNTER_GET(zcopy_tx_aborted));
seq_printf(seq, "- TX error\t\t: %d\n", SDPSTATS_COUNTER_GET(zcopy_tx_error));
return 0;
}
SDPSTATS_COUNTER_MID_INC(post_send, h->mid);
SDPSTATS_HIST(send_size, skb->len);
+ if (!ssk->qp_active)
+ goto err;
+
ssk->tx_packets++;
ssk->tx_bytes += skb->len;
sdp_set_error(&ssk->isk.sk, -ECONNRESET);
wake_up(&ssk->wq);
}
+
+ return;
+
+err:
+ __kfree_skb(skb);
}
static struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq)
int got_srcavail_cancel;
int rc = 0;
int len = *used;
+ int copied;
sdp_dbg_data(&ssk->isk.sk, "preparing RDMA read."
" len: 0x%x. buffer len: 0x%lx\n", len, iov->iov_len);
goto err_post_send;
}
- /* Ignore any data copied after getting SrcAvailCancel */
- if (!got_srcavail_cancel) {
- int copied = rx_sa->umem->length;
+ copied = rx_sa->umem->length;
- sdp_update_iov_used(sk, iov, copied);
- rx_sa->used += copied;
- atomic_add(copied, &ssk->rcv_nxt);
- *used = copied;
- }
+ sdp_update_iov_used(sk, iov, copied);
+ rx_sa->used += copied;
+ atomic_add(copied, &ssk->rcv_nxt);
+ *used = copied;
ssk->tx_ring.rdma_inflight = NULL;