module_param_named(rcvbuf_scale, rcvbuf_scale, int, 0644);
MODULE_PARM_DESC(rcvbuf_scale, "Receive buffer size scale factor.");
+static int top_mem_usage = 0;
+module_param_named(top_mem_usage, top_mem_usage, int, 0644);
+MODULE_PARM_DESC(top_mem_usage, "Top system wide sdp memory usage for recv (in MB).");
+
+atomic_t current_mem_usage;
+
/* Like tcp_fin */
static void sdp_fin(struct sock *sk)
{
sdp_dbg(&ssk->isk.sk, "ib_post_recv failed with status %d\n", rc);
sdp_reset(&ssk->isk.sk);
}
+
+ atomic_add(SDP_MAX_SEND_SKB_FRAGS, ¤t_mem_usage);
}
void sdp_post_recvs(struct sdp_sock *ssk)
{
+ int scale = rcvbuf_scale;
if (unlikely(!ssk->id))
return;
+ if (top_mem_usage &&
+ (top_mem_usage * 0x100000) < atomic_read(¤t_mem_usage) * PAGE_SIZE)
+ scale = 1;
+
while ((likely(ssk->rx_head - ssk->rx_tail < SDP_RX_SIZE) &&
(ssk->rx_head - ssk->rx_tail - SDP_MIN_BUFS) *
(SDP_HEAD_SIZE + SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE) +
ssk->rcv_nxt - ssk->copied_seq <
- ssk->isk.sk.sk_rcvbuf * rcvbuf_scale) ||
+ ssk->isk.sk.sk_rcvbuf * scale) ||
unlikely(ssk->rx_head - ssk->rx_tail < SDP_MIN_BUFS))
sdp_post_recv(ssk);
}
if (unlikely(!skb))
return;
+ atomic_sub(SDP_MAX_SEND_SKB_FRAGS, ¤t_mem_usage);
+
if (unlikely(wc->status)) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
sdp_dbg(&ssk->isk.sk,
static struct list_head sock_list;
static spinlock_t sock_list_lock;
+extern atomic_t current_mem_usage;
+
inline void sdp_add_sock(struct sdp_sock *ssk)
{
spin_lock_irq(&sock_list_lock);
skb = sdp_recv_completion(ssk, ssk->rx_tail);
if (!skb)
break;
+ atomic_sub(SDP_MAX_SEND_SKB_FRAGS, ¤t_mem_usage);
__kfree_skb(skb);
}
while (ssk->tx_head != ssk->tx_tail) {
sdp_proc_init();
+ atomic_set(¤t_mem_usage, 0);
+
return 0;
}
BUG_ON(!list_empty(&sock_list));
+ if (atomic_read(¤t_mem_usage))
+ printk(KERN_WARNING "%s: current mem usage %d\n", __func__,
+ atomic_read(¤t_mem_usage));
+
sdp_proc_unregister();
}