]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
IB/sdp: allow users via module parameter to bound SDP's memory use
authorAmiram Perlmutter <amip@mellanox.co.il>
Thu, 25 Jan 2007 09:53:25 +0000 (11:53 +0200)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Tue, 6 Oct 2015 12:04:03 +0000 (05:04 -0700)
Signed-off-by: Amiram Perlmutter <amip@mellanox.co.il>
drivers/infiniband/ulp/sdp/sdp.h
drivers/infiniband/ulp/sdp/sdp_bcopy.c
drivers/infiniband/ulp/sdp/sdp_main.c

index 21679de6835fabce0c53c3591145389292f6376b..068a8616d3b7b21095a8eb1725bed0f3032ed0b4 100644 (file)
@@ -142,6 +142,8 @@ struct sdp_sock {
 extern struct proto sdp_proto;
 extern struct workqueue_struct *sdp_workqueue;
 
+extern atomic_t current_mem_usage;
+
 /* just like TCP fs */
 struct sdp_seq_afinfo {
        struct module           *owner;
index 6e4f10aa4cdae1978b00cab3e2d32ef1ff3dc94c..323f53052843fef732419e5f2bf9f8f40052a783 100644 (file)
@@ -41,6 +41,12 @@ static int rcvbuf_scale = 0x10;
 module_param_named(rcvbuf_scale, rcvbuf_scale, int, 0644);
 MODULE_PARM_DESC(rcvbuf_scale, "Receive buffer size scale factor.");
 
+static int top_mem_usage = 0;
+module_param_named(top_mem_usage, top_mem_usage, int, 0644);
+MODULE_PARM_DESC(top_mem_usage, "Top system wide sdp memory usage for recv (in MB).");
+
+atomic_t current_mem_usage;
+
 /* Like tcp_fin */
 static void sdp_fin(struct sock *sk)
 {
@@ -235,18 +241,25 @@ static void sdp_post_recv(struct sdp_sock *ssk)
                sdp_dbg(&ssk->isk.sk, "ib_post_recv failed with status %d\n", rc);
                sdp_reset(&ssk->isk.sk);
        }
+
+       atomic_add(SDP_MAX_SEND_SKB_FRAGS, &current_mem_usage);
 }
 
 void sdp_post_recvs(struct sdp_sock *ssk)
 {
+       int scale = rcvbuf_scale;
        if (unlikely(!ssk->id))
                return;
 
+       if (top_mem_usage &&
+           (top_mem_usage * 0x100000) < atomic_read(&current_mem_usage) * PAGE_SIZE)
+               scale = 1;
+
        while ((likely(ssk->rx_head - ssk->rx_tail < SDP_RX_SIZE) &&
                (ssk->rx_head - ssk->rx_tail - SDP_MIN_BUFS) *
                (SDP_HEAD_SIZE + SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE) +
                ssk->rcv_nxt - ssk->copied_seq <
-               ssk->isk.sk.sk_rcvbuf * rcvbuf_scale) ||
+               ssk->isk.sk.sk_rcvbuf * scale) ||
               unlikely(ssk->rx_head - ssk->rx_tail < SDP_MIN_BUFS))
                sdp_post_recv(ssk);
 }
@@ -400,6 +413,8 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
                if (unlikely(!skb))
                        return;
 
+               atomic_sub(SDP_MAX_SEND_SKB_FRAGS, &current_mem_usage);
+
                if (unlikely(wc->status)) {
                        if (wc->status != IB_WC_WR_FLUSH_ERR) {
                                sdp_dbg(&ssk->isk.sk,
index a1b02404919d659fdf160d8522744370bde0fbab..2f940c12b88be97ad89e044adf6b39f57b97e5c5 100644 (file)
@@ -122,6 +122,8 @@ struct workqueue_struct *sdp_workqueue;
 static struct list_head sock_list;
 static spinlock_t sock_list_lock;
 
+extern atomic_t current_mem_usage;
+
 inline void sdp_add_sock(struct sdp_sock *ssk)
 {
        spin_lock_irq(&sock_list_lock);
@@ -191,6 +193,7 @@ static void sdp_destroy_qp(struct sdp_sock *ssk)
                        skb = sdp_recv_completion(ssk, ssk->rx_tail);
                        if (!skb)
                                break;
+                       atomic_sub(SDP_MAX_SEND_SKB_FRAGS, &current_mem_usage);
                        __kfree_skb(skb);
                }
                while (ssk->tx_head != ssk->tx_tail) {
@@ -1890,6 +1893,8 @@ static int __init sdp_init(void)
 
        sdp_proc_init();
 
+       atomic_set(&current_mem_usage, 0);
+
        return 0;
 }
 
@@ -1906,6 +1911,10 @@ static void __exit sdp_exit(void)
 
        BUG_ON(!list_empty(&sock_list));
 
+       if (atomic_read(&current_mem_usage))
+               printk(KERN_WARNING "%s: current mem usage %d\n", __func__,
+                      atomic_read(&current_mem_usage));
+
        sdp_proc_unregister();
 }