instead of interrupts (in per-core Tx rings) - should be power of 2 */
#define SDP_TX_POLL_MODER 16
#define SDP_TX_POLL_TIMEOUT (HZ / 4)
+#define SDP_NAGLE_TIMEOUT (HZ / 10)
#define SDP_RESOLVE_TIMEOUT 1000
#define SDP_ROUTE_TIMEOUT 1000
unsigned max_bufs; /* Initial buffers offered by other side */
unsigned min_bufs; /* Low water mark to wake senders */
+ unsigned long nagle_last_unacked; /* mseq of lastest unacked packet */
+ struct timer_list nagle_timer; /* timeout waiting for ack */
+
atomic_t remote_credits;
#define remote_credits(ssk) (atomic_read(&ssk->remote_credits))
int poll_cq;
void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid);
void _sdp_post_sends(const char *func, int line, struct sdp_sock *ssk, int nonagle);
#define sdp_post_sends(ssk, nonagle) _sdp_post_sends(__func__, __LINE__, ssk, nonagle)
+void sdp_nagle_timeout(unsigned long data);
/* sdp_rx.c */
void sdp_rx_ring_init(struct sdp_sock *ssk);
static inline int sdp_nagle_off(struct sdp_sock *ssk, struct sk_buff *skb)
{
- return (ssk->nonagle & TCP_NAGLE_OFF) ||
+ int send_now =
+ (ssk->nonagle & TCP_NAGLE_OFF) ||
+ !ssk->nagle_last_unacked ||
skb->next != (struct sk_buff *)&ssk->isk.sk.sk_write_queue ||
skb->len + sizeof(struct sdp_bsdh) >= ssk->xmit_size_goal ||
- (ring_tail(ssk->tx_ring) == ring_head(ssk->tx_ring) &&
- !(ssk->nonagle & TCP_NAGLE_CORK)) ||
(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_PSH);
+
+ if (send_now) {
+ unsigned long mseq = ring_head(ssk->tx_ring);
+ ssk->nagle_last_unacked = mseq;
+
+ mod_timer(&ssk->nagle_timer, jiffies + SDP_NAGLE_TIMEOUT);
+ sdp_dbg_data(&ssk->isk.sk, "Starting nagle timer\n");
+ }
+ sdp_dbg_data(&ssk->isk.sk, "send_now = %d last_unacked = %ld\n",
+ send_now, ssk->nagle_last_unacked);
+
+ return send_now;
+}
+
+void sdp_nagle_timeout(unsigned long data)
+{
+ struct sdp_sock *ssk = (struct sdp_sock *)data;
+ struct sock *sk = &ssk->isk.sk;
+
+ sdp_dbg_data(&ssk->isk.sk, "last_unacked = %ld\n", ssk->nagle_last_unacked);
+
+ if (!ssk->nagle_last_unacked)
+ return;
+
+ /* Only process if the socket is not in use */
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ mod_timer(&ssk->nagle_timer, jiffies + SDP_NAGLE_TIMEOUT);
+ sdp_dbg_data(&ssk->isk.sk, "socket is busy - trying later\n");
+ goto out;
+ }
+
+ if (sk->sk_state == TCP_CLOSE)
+ goto out;
+
+ ssk->nagle_last_unacked = 0;
+ sdp_post_sends(ssk, 0);
+
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ sk_stream_write_space(&ssk->isk.sk);
+out:
+ bh_unlock_sock(sk);
}
int sdp_post_credits(struct sdp_sock *ssk)
struct sdp_bsdh *h;
struct sock *sk = &ssk->isk.sk;
int credits_before;
+ unsigned long mseq_ack;
skb = sdp_recv_completion(ssk, wc->wr_id);
if (unlikely(!skb))
SDPSTATS_HIST_LINEAR(credits_before_update, tx_credits(ssk));
+ mseq_ack = ntohl(h->mseq_ack);
credits_before = tx_credits(ssk);
- atomic_set(&ssk->tx_ring.credits, ntohl(h->mseq_ack) - ring_head(ssk->tx_ring) + 1 +
+ atomic_set(&ssk->tx_ring.credits, mseq_ack - ring_head(ssk->tx_ring) + 1 +
ntohs(h->bufs));
+ if (mseq_ack >= ssk->nagle_last_unacked)
+ ssk->nagle_last_unacked = 0;
sdp_prf(&ssk->isk.sk, skb, "RX %s bufs=%d c before:%d after:%d "
"mseq:%d, ack:%d", mid2str(h->mid), ntohs(h->bufs), credits_before,
{
struct sdp_buf *tx_req;
struct sdp_bsdh *h = (struct sdp_bsdh *)skb_push(skb, sizeof *h);
- unsigned mseq = ring_head(ssk->tx_ring);
+ unsigned long mseq = ring_head(ssk->tx_ring);
int i, rc, frags;
u64 addr;
struct ib_device *dev;
struct ib_sge *sge = ibsge;
struct ib_send_wr tx_wr = { 0 };
-
SDPSTATS_COUNTER_MID_INC(post_send, mid);
SDPSTATS_HIST(send_size, skb->len);
h->mseq = htonl(mseq);
h->mseq_ack = htonl(mseq_ack(ssk));
- sdp_prf(&ssk->isk.sk, skb, "TX: %s bufs: %d mseq:%d ack:%d",
+ sdp_prf(&ssk->isk.sk, skb, "TX: %s bufs: %d mseq:%ld ack:%d",
mid2str(mid), ring_posted(ssk->rx_ring), mseq, ntohl(h->mseq_ack));
SDP_DUMP_PACKET(&ssk->isk.sk, "TX", skb, h);
ssk->tx_ring.timer.data = (unsigned long) ssk;
ssk->tx_ring.poll_cnt = 0;
+ init_timer(&ssk->nagle_timer);
+ ssk->nagle_timer.function = sdp_nagle_timeout;
+ ssk->nagle_timer.data = (unsigned long) ssk;
+
return 0;
err_cq:
void sdp_tx_ring_destroy(struct sdp_sock *ssk)
{
+ del_timer(&ssk->nagle_timer);
+
if (ssk->tx_ring.buffer) {
sdp_tx_ring_purge(ssk);