extern struct proto sdp_proto;
extern struct workqueue_struct *rx_comp_wq;
extern atomic_t sdp_current_mem_usage;
+extern int top_mem_usage;
extern spinlock_t sdp_large_sockets_lock;
extern struct ib_client sdp_client;
#ifdef SDPSTATS_ON
#endif
}
+static inline int sdp_has_free_mem(void)
+{
+ /* TODO: count also kmalloc's and skb's allocations. */
+
+ return !top_mem_usage || atomic_read(&sdp_current_mem_usage) <
+ top_mem_usage << (20 - PAGE_SHIFT);
+}
+
/* utilities */
static inline char *mid2str(int mid)
{
return mid2str[mid];
}
+static inline void sdp_free_skb(struct sk_buff *skb)
+{
+ if (unlikely(skb_shinfo(skb)->nr_frags))
+ atomic_sub(skb_shinfo(skb)->nr_frags, &sdp_current_mem_usage);
+
+ __kfree_skb(skb);
+}
+
static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size,
gfp_t gfp)
{
if (sk->sk_sndmsg_page) {
__free_page(sk->sk_sndmsg_page);
sk->sk_sndmsg_page = NULL;
+ atomic_dec(&sdp_current_mem_usage);
}
id = ssk->id;
sdp_dbg(sk, "Data was unread. skb: %p\n", skb);
data_was_unread = 1;
}
- __kfree_skb(skb);
+ sdp_free_skb(skb);
}
sk_mem_reclaim(sk);
if (!page) {
/* Allocate new cache page. */
- if (!(page = sk_stream_alloc_page(sk)))
+ if (sdp_has_free_mem()) {
+ page = sk_stream_alloc_page(sk);
+ if (!page)
+ return SDP_DO_WAIT_MEM;
+ atomic_inc(&sdp_current_mem_usage);
+ } else
return SDP_DO_WAIT_MEM;
}
goto skb_cleanup;
sdp_warn(sk, "err from rdma %d - sendSM\n", err);
skb_unlink(skb, &sk->sk_receive_queue);
- __kfree_skb(skb);
+ sdp_free_skb(skb);
}
} else {
sdp_dbg_data(sk, "memcpy 0x%lx bytes +0x%x -> %p\n",
force_skb_cleanup:
sdp_dbg_data(sk, "unlinking skb %p\n", skb);
skb_unlink(skb, &sk->sk_receive_queue);
- __kfree_skb(skb);
+ sdp_free_skb(skb);
}
continue;
found_fin_ok:
++*seq;
if (!(flags & MSG_PEEK)) {
skb_unlink(skb, &sk->sk_receive_queue);
- __kfree_skb(skb);
+ sdp_free_skb(skb);
}
break;
"Receive buffer initial size in bytes.");
SDP_MODPARAM_SINT(rcvbuf_scale, 0x10,
"Receive buffer size scale factor.");
-SDP_MODPARAM_SINT(top_mem_usage, 0,
+SDP_MODPARAM_INT(top_mem_usage, 0,
"Top system wide sdp memory usage for recv (in MB).");
#ifdef CONFIG_PPC
if (rx_req->mapping[i + 1])
page = rx_req->pages[i];
else {
+ if (unlikely(!sdp_has_free_mem()))
+ goto err;
rx_req->pages[i] = page = alloc_pages(gfp_page, 0);
if (unlikely(!page))
goto err;
return 0;
err:
+ atomic_add(pages_alloced, &sdp_current_mem_usage);
sdp_cleanup_sdp_buf(ssk, rx_req, SDP_SKB_HEAD_SIZE, DMA_FROM_DEVICE);
- __kfree_skb(skb);
+ sdp_free_skb(skb);
sdp_reset(&ssk->isk.sk);
return -1;
}
{
struct sock *sk = &ssk->isk.sk;
int buffer_size = SDP_SKB_HEAD_SIZE + ssk->recv_frags * PAGE_SIZE;
- unsigned long max_bytes;
+ unsigned long max_bytes = ssk->rcvbuf_scale;
unsigned long bytes_in_process;
int posted = rx_ring_posted(ssk);
- if (unlikely(!ssk->qp_active))
+ if (unlikely(!ssk->qp_active || !sdp_has_free_mem()))
return 0;
if (likely(posted >= SDP_RX_SIZE))
/* If rcvbuf is very small, must leave at least 1 skb for data,
* in addition to SDP_MIN_TX_CREDITS */
- max_bytes = max(sk->sk_rcvbuf, (1 + SDP_MIN_TX_CREDITS) * buffer_size);
-
- if (!top_mem_usage || (top_mem_usage * 0x100000) >=
- atomic_read(&sdp_current_mem_usage) * PAGE_SIZE) {
- max_bytes *= ssk->rcvbuf_scale;
- }
+ max_bytes *= max(sk->sk_rcvbuf, (1 + SDP_MIN_TX_CREDITS) * buffer_size);
/* Bytes posted to HW */
bytes_in_process = (posted - SDP_MIN_TX_CREDITS) * buffer_size;
sdp_warn(sk, "SDP: FIXME MID %d\n", h->mid);
}
- __kfree_skb(skb);
+ sdp_free_skb(skb);
return 0;
}
if (unlikely(h->mid == SDP_MID_DATA && skb->len == 0)) {
/* Credit update is valid even after RCV_SHUTDOWN */
- __kfree_skb(skb);
+ sdp_free_skb(skb);
return 0;
}
sdp_dbg_data(sk, "RdmaRdCompl message arrived\n");
sdp_handle_rdma_read_compl(ssk, ntohl(h->mseq_ack),
ntohl(rrch->len));
- __kfree_skb(skb);
+ sdp_free_skb(skb);
} else
skb_queue_tail(&ssk->rx_ctl_q, skb);
if (unlikely(!skb))
return NULL;
- if (unlikely(skb_shinfo(skb)->nr_frags))
- atomic_sub(skb_shinfo(skb)->nr_frags, &sdp_current_mem_usage);
-
if (unlikely(wc->status)) {
if (ssk->qp_active) {
sdp_dbg(sk, "Recv completion with error. "
sdp_reset(sk);
ssk->qp_active = 0;
}
- __kfree_skb(skb);
+ sdp_free_skb(skb);
return NULL;
}
if (unlikely(wc->byte_len < sizeof(struct sdp_bsdh))) {
sdp_warn(sk, "SDP BUG! byte_len %d < %zd\n",
wc->byte_len, sizeof(struct sdp_bsdh));
- __kfree_skb(skb);
+ sdp_free_skb(skb);
return NULL;
}
skb->len = wc->byte_len;
skb = sdp_recv_completion(ssk, ring_tail(ssk->rx_ring), INT_MAX);
if (!skb)
break;
- atomic_sub(skb_shinfo(skb)->nr_frags, &sdp_current_mem_usage);
- __kfree_skb(skb);
+ sdp_free_skb(skb);
}
for (id = 0; id < SDP_RX_SIZE; id++) {
DMA_FROM_DEVICE);
sbuf->mapping[i] = 0;
put_page(sbuf->pages[i - 1]);
- atomic_sub(1, &sdp_current_mem_usage);
+ atomic_dec(&sdp_current_mem_usage);
}
}
}