#define SDP_SRCAVAIL_CANCEL_TIMEOUT (HZ * 5)
#define SDP_SRCAVAIL_ADV_TIMEOUT (1 * HZ)
-#define SDP_SRCAVAIL_PAYLOAD_LEN 1
#define SDP_RESOLVE_TIMEOUT 1000
#define SDP_ROUTE_TIMEOUT 1000
#define SDP_BZCOPY_POLL_TIMEOUT (HZ / 10)
#define SDP_AUTO_CONF 0xffff
-#define AUTO_MOD_DELAY (HZ / 4)
struct sdp_skb_cb {
__u32 seq; /* Starting sequence number */
atomic_t tail;
struct ib_cq *cq;
- int una_seq;
+ u32 una_seq;
atomic_t credits;
#define tx_credits(ssk) (atomic_read(&ssk->tx_ring.credits))
#define rcv_nxt(ssk) atomic_read(&(ssk->rcv_nxt))
atomic_t rcv_nxt;
- int write_seq;
+ u32 write_seq;
int xmit_size_goal;
int nonagle;
unsigned max_bufs; /* Initial buffers offered by other side */
unsigned min_bufs; /* Low water mark to wake senders */
- unsigned long nagle_last_unacked; /* mseq of lastest unacked packet */
+ u32 nagle_last_unacked; /* mseq of lastest unacked packet */
struct timer_list nagle_timer; /* timeout waiting for ack */
atomic_t remote_credits;
sdp_dbg_data(&ssk->isk.sk, "Starting nagle timer\n");
}
}
- sdp_dbg_data(&ssk->isk.sk, "send_now = %d last_unacked = %ld\n",
+ sdp_dbg_data(&ssk->isk.sk, "send_now = %d last_unacked = %u\n",
send_now, ssk->nagle_last_unacked);
return send_now;
struct sock *sk = &ssk->isk.sk;
SDPSTATS_COUNTER_INC(nagle_timer);
- sdp_dbg_data(sk, "last_unacked = %ld\n", ssk->nagle_last_unacked);
+ sdp_dbg_data(sk, "last_unacked = %u\n", ssk->nagle_last_unacked);
if (!ssk->nagle_last_unacked)
goto out2;
sdp_post_sends(ssk, GFP_ATOMIC);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- sk_stream_write_space(&ssk->isk.sk);
+ sk_stream_write_space(sk);
out:
bh_unlock_sock(sk);
out2:
struct sock *sk = &ssk->isk.sk;
if (unlikely(!ssk->id)) {
- if (ssk->isk.sk.sk_send_head) {
- sdp_dbg(&ssk->isk.sk,
- "Send on socket without cmid ECONNRESET.\n");
+ if (sk->sk_send_head) {
+ sdp_dbg(sk, "Send on socket without cmid ECONNRESET\n");
/* TODO: flush send queue? */
- sdp_reset(&ssk->isk.sk);
+ sdp_reset(sk);
}
return;
}
again:
if (sdp_tx_ring_slots_left(ssk) < SDP_TX_SIZE / 2)
- sdp_xmit_poll(ssk, 1);
+ sdp_xmit_poll(ssk, 1);
/* Run out of credits, check if got a credit update */
if (unlikely(tx_credits(ssk) <= SDP_MIN_TX_CREDITS)) {
if (tx_credits(ssk) <= SDP_MIN_TX_CREDITS &&
sdp_tx_ring_slots_left(ssk) &&
- ssk->isk.sk.sk_send_head &&
- sdp_nagle_off(ssk, ssk->isk.sk.sk_send_head)) {
+ sk->sk_send_head &&
+ sdp_nagle_off(ssk, sk->sk_send_head)) {
SDPSTATS_COUNTER_INC(send_miss_no_credits);
}
while (tx_credits(ssk) > SDP_MIN_TX_CREDITS &&
sdp_tx_ring_slots_left(ssk) &&
- (skb = ssk->isk.sk.sk_send_head) &&
+ (skb = sk->sk_send_head) &&
sdp_nagle_off(ssk, skb)) {
- update_send_head(&ssk->isk.sk, skb);
- __skb_dequeue(&ssk->isk.sk.sk_write_queue);
+ update_send_head(sk, skb);
+ __skb_dequeue(&sk->sk_write_queue);
sdp_post_send(ssk, skb);
}
if (credit_update_needed(ssk) &&
- likely((1 << ssk->isk.sk.sk_state) &
+ likely((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
- skb = sdp_alloc_skb_data(&ssk->isk.sk, 0, gfp);
+ skb = sdp_alloc_skb_data(sk, 0, gfp);
if (likely(skb)) {
sdp_post_send(ssk, skb);
SDPSTATS_COUNTER_INC(post_send_credits);
* messages that provide additional credits and also do not contain ULP
* payload. */
if (unlikely(ssk->sdp_disconnect) &&
- !ssk->isk.sk.sk_send_head &&
+ !sk->sk_send_head &&
tx_credits(ssk) > 1) {
skb = sdp_alloc_skb_disconnect(sk, gfp);
if (likely(skb)) {
SDP_MODPARAM_SINT(sdp_link_layer_ib_only, 1, "Support only link layer of "
"type Infiniband");
-enum {
- SDP_HH_SIZE = 76,
- SDP_HAH_SIZE = 180,
-};
-
static void sdp_qp_event_handler(struct ib_event *event, void *data)
{
}
conn_param.responder_resources = 4 /* TODO */;
conn_param.initiator_depth = 4 /* TODO */;
conn_param.retry_count = SDP_RETRY_COUNT;
- SDP_DUMP_PACKET(NULL, "TX", NULL, &hh.bsdh);
+ SDP_DUMP_PACKET(sk, "TX", NULL, &hh.bsdh);
rc = rdma_connect(id, &conn_param);
break;
case RDMA_CM_EVENT_ROUTE_ERROR:
goto out;
}
- if (ssk->tx_sa->mseq > mseq_ack) {
+ if (after(ssk->tx_sa->mseq, mseq_ack)) {
sdp_dbg_data(sk, "SendSM arrived for old SrcAvail. "
"SendSM mseq_ack: 0x%x, SrcAvail mseq: 0x%x\n",
mseq_ack, ssk->tx_sa->mseq);
goto out;
}
- if (ssk->tx_sa->mseq > mseq_ack) {
+ if (after(ssk->tx_sa->mseq, mseq_ack)) {
sdp_dbg_data(sk, "RdmaRdCompl arrived for old SrcAvail. "
"SendSM mseq_ack: 0x%x, SrcAvail mseq: 0x%x\n",
mseq_ack, ssk->tx_sa->mseq);
out:
spin_unlock_irqrestore(&ssk->tx_sa_lock, flags);
- return;
}
static unsigned long sdp_get_max_memlockable_bytes(unsigned long offset)