sdp_sk(sk)->max_bufs = ntohs(h->bsdh.bufs);
atomic_set(&sdp_sk(sk)->tx_ring.credits, sdp_sk(sk)->max_bufs);
sdp_sk(sk)->min_bufs = tx_credits(sdp_sk(sk)) / 4;
- sdp_sk(sk)->xmit_size_goal = ntohl(h->actrcvsz) - SDP_HEAD_SIZE;
+ sdp_sk(sk)->xmit_size_goal = ntohl(h->actrcvsz) - sizeof(struct sdp_bsdh);
sdp_sk(sk)->send_frags = MIN(PAGE_ALIGN(sdp_sk(sk)->xmit_size_goal) /
PAGE_SIZE, MAX_SKB_FRAGS) + 1; /* The +1 is to conpensate on not aligned buffers */
sdp_sk(sk)->xmit_size_goal = MIN(sdp_sk(sk)->xmit_size_goal,
hh.majv_minv = SDP_MAJV_MINV;
sdp_init_buffers(sdp_sk(sk), rcvbuf_initial_size);
hh.localrcvsz = hh.desremrcvsz = htonl(sdp_sk(sk)->recv_frags *
- PAGE_SIZE + SDP_HEAD_SIZE);
+ PAGE_SIZE + sizeof(struct sdp_bsdh));
hh.max_adverts = 0x1;
inet_sk(sk)->saddr = inet_sk(sk)->rcv_saddr =
((struct sockaddr_in *)&id->route.addr.src_addr)->sin_addr.s_addr;
hah.majv_minv = SDP_MAJV_MINV;
hah.ext_max_adverts = 1; /* Doesn't seem to be mandated by spec,
but just in case */
- hah.actrcvsz = htonl(sdp_sk(child)->recv_frags * PAGE_SIZE + SDP_HEAD_SIZE);
+ hah.actrcvsz = htonl(sdp_sk(child)->recv_frags * PAGE_SIZE +
+ sizeof(struct sdp_bsdh));
memset(&conn_param, 0, sizeof conn_param);
conn_param.private_data_len = sizeof hah;
conn_param.private_data = &hah;
credits_before, tx_credits(ssk));
if (posts_handler(ssk) ||
- (!skb_queue_empty(&ssk->rx_ctl_q) &&
- (sk->sk_socket && (sk->sk_socket->flags & SOCK_ASYNC_WAITDATA)))) {
+ (sk->sk_socket && test_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags))) {
sdp_prf(&ssk->isk.sk, NULL, "Somebody is doing the post work for me. %d",
posts_handler(ssk));
if (unlikely(wc->status)) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
struct sock *sk = &ssk->isk.sk;
+ sdp_prf(sk, skb, "Send completion with error. "
+ "Status %d", wc->status);
sdp_warn(sk, "Send completion with error. "
"Status %d\n", wc->status);
sdp_set_error(sk, -ECONNRESET);