}
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
- --ssk->bufs;
+ --ssk->tx_credits;
ssk->remote_credits = ssk->rx_head - ssk->rx_tail;
if (unlikely(rc)) {
sdp_dbg(&ssk->isk.sk, "ib_post_send failed with status %d.\n", rc);
++ssk->tx_tail;
/* TODO: AIO and real zcopy cdoe; add their context support here */
- bz = *(struct bzcopy_state **)skb->cb;
+ bz = BZCOPY_STATE(skb);
if (bz)
bz->busy--;
scale = 1;
while ((likely(ssk->rx_head - ssk->rx_tail < SDP_RX_SIZE) &&
- (ssk->rx_head - ssk->rx_tail - SDP_MIN_BUFS) *
+ (ssk->rx_head - ssk->rx_tail - SDP_MIN_TX_CREDITS) *
(SDP_HEAD_SIZE + ssk->recv_frags * PAGE_SIZE) +
ssk->rcv_nxt - ssk->copied_seq < sk->sk_rcvbuf * scale) ||
- unlikely(ssk->rx_head - ssk->rx_tail < SDP_MIN_BUFS))
+ unlikely(ssk->rx_head - ssk->rx_tail < SDP_MIN_TX_CREDITS))
sdp_post_recv(ssk);
}
int sdp_post_credits(struct sdp_sock *ssk)
{
- if (likely(ssk->bufs > 1) &&
+ if (likely(ssk->tx_credits > 1) &&
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
struct sk_buff *skb;
skb = sdp_stream_alloc_skb(&ssk->isk.sk,
if (ssk->recv_request &&
ssk->rx_tail >= ssk->recv_request_head &&
- ssk->bufs >= SDP_MIN_BUFS &&
+ ssk->tx_credits >= SDP_MIN_TX_CREDITS &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
sdp_post_send(ssk, skb, SDP_MID_CHRCVBUF_ACK);
}
- while (ssk->bufs > SDP_MIN_BUFS &&
+ while (ssk->tx_credits > SDP_MIN_TX_CREDITS &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE &&
(skb = ssk->isk.sk.sk_send_head) &&
sdp_nagle_off(ssk, skb)) {
sdp_post_send(ssk, skb, SDP_MID_DATA);
}
- if (ssk->bufs == SDP_MIN_BUFS &&
+ if (ssk->tx_credits == SDP_MIN_TX_CREDITS &&
!ssk->sent_request &&
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
}
c = ssk->remote_credits;
- if (likely(c > SDP_MIN_BUFS))
+ if (likely(c > SDP_MIN_TX_CREDITS))
c *= 2;
if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- likely(ssk->bufs > 1) &&
+ likely(ssk->tx_credits > 1) &&
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
if (unlikely(ssk->sdp_disconnect) &&
!ssk->isk.sk.sk_send_head &&
- ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ ssk->tx_credits > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
skb = sdp_stream_alloc_skb(&ssk->isk.sk,
sizeof(struct sdp_bsdh),
printk(KERN_WARNING "SDP BUG! mseq %d != wrid %d\n",
ssk->mseq_ack, (int)wc->wr_id);
- SDPSTATS_HIST_LINEAR(credits_before_update, ssk->bufs);
- ssk->bufs = ntohl(h->mseq_ack) - ssk->tx_head + 1 +
+ SDPSTATS_HIST_LINEAR(credits_before_update, ssk->tx_credits);
+ ssk->tx_credits = ntohl(h->mseq_ack) - ssk->tx_head + 1 +
ntohs(h->bufs);
frags = skb_shinfo(skb)->nr_frags;
static int sdp_handle_send_comp(struct sdp_sock *ssk, struct ib_wc *wc)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct sdp_bsdh *h;
skb = sdp_send_completion(ssk, wc->wr_id);
sdp_add_sock(sdp_sk(child));
- sdp_sk(child)->max_bufs = sdp_sk(child)->bufs = ntohs(h->bsdh.bufs);
- sdp_sk(child)->min_bufs = sdp_sk(child)->bufs / 4;
+ sdp_sk(child)->max_bufs = sdp_sk(child)->tx_credits = ntohs(h->bsdh.bufs);
+ sdp_sk(child)->min_bufs = sdp_sk(child)->tx_credits / 4;
sdp_sk(child)->xmit_size_goal = ntohl(h->localrcvsz) -
sizeof(struct sdp_bsdh);
sdp_sk(child)->send_frags = PAGE_ALIGN(sdp_sk(child)->xmit_size_goal) /
PAGE_SIZE;
sdp_init_buffers(sdp_sk(child), rcvbuf_initial_size);
- sdp_dbg(child, "%s recv_frags: %d bufs %d xmit_size_goal %d send trigger %d\n",
+
+ sdp_dbg(child, "%s recv_frags: %d tx credits %d xmit_size_goal %d send trigger %d\n",
__func__,
sdp_sk(child)->recv_frags,
- sdp_sk(child)->bufs,
+ sdp_sk(child)->tx_credits,
sdp_sk(child)->xmit_size_goal,
sdp_sk(child)->min_bufs);
h = event->param.conn.private_data;
SDP_DUMP_PACKET(sk, "RX", NULL, &h->bsdh);
- sdp_sk(sk)->max_bufs = sdp_sk(sk)->bufs = ntohs(h->bsdh.bufs);
- sdp_sk(sk)->min_bufs = sdp_sk(sk)->bufs / 4;
+ sdp_sk(sk)->max_bufs = sdp_sk(sk)->tx_credits = ntohs(h->bsdh.bufs);
+ sdp_sk(sk)->min_bufs = sdp_sk(sk)->tx_credits / 4;
sdp_sk(sk)->xmit_size_goal = ntohl(h->actrcvsz) -
sizeof(struct sdp_bsdh);
sdp_sk(sk)->send_frags = MIN(PAGE_ALIGN(sdp_sk(sk)->xmit_size_goal) /
sdp_dbg(sk, "%s bufs %d xmit_size_goal %d send_frags: %d send trigger %d\n",
__func__,
- sdp_sk(sk)->bufs,
+ sdp_sk(sk)->tx_credits,
sdp_sk(sk)->xmit_size_goal,
sdp_sk(sk)->send_frags,
sdp_sk(sk)->min_bufs);
{
int min_free;
- min_free = SDP_TX_SIZE - (ssk->tx_head - ssk->tx_tail);
- if (ssk->bufs < min_free)
- min_free = ssk->bufs;
- min_free -= (min_free < SDP_MIN_BUFS) ? min_free : SDP_MIN_BUFS;
+ min_free = MIN(ssk->tx_credits, SDP_TX_SIZE - (ssk->tx_head - ssk->tx_tail));
+ if (min_free < SDP_MIN_TX_CREDITS)
+ return 0;
- return min_free;
+ return min_free - SDP_MIN_TX_CREDITS;
};
/* like sk_stream_memory_free - except measures remote credits */
struct sock *sk = &ssk->isk.sk;
struct socket *sock = sk->sk_socket;
- if (ssk->bufs >= ssk->min_bufs &&
+ if (ssk->tx_credits >= ssk->min_bufs &&
ssk->tx_head == ssk->tx_tail &&
sock != NULL) {
clear_bit(SOCK_NOSPACE, &sock->flags);
if (!sk->sk_send_head ||
(copy = size_goal - skb->len) <= 0 ||
- bz != *(struct bzcopy_state **)skb->cb) {
-
+ bz != BZCOPY_STATE(skb)) {
new_segment:
/*
* Allocate a new segment
if (!skb)
goto wait_for_memory;
- *((struct bzcopy_state **)skb->cb) = bz;
+ BZCOPY_STATE(skb) = bz;
/*
* Check whether we can use HW checksum.