]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
net: rds: fix rds_ib_sysctl_max_recv_allocation error
authorZhu Yanjun <yanjun.zhu@oracle.com>
Wed, 26 Dec 2018 00:33:02 +0000 (19:33 -0500)
committerBrian Maly <brian.maly@oracle.com>
Tue, 8 Jan 2019 15:42:01 +0000 (10:42 -0500)
Before the commit c682e8474bd4 ("net/rds: reduce memory footprint
during ib_post_recv in IB transport"), rds_ib_allocation increases
by one. So the function atomic_add_unless will work. After the commit,
rds_ib_allocation increases by 4 if the frag is 16K. Then
atomic_add_unless will not work.

Fixes: c682e8474bd4 ("net/rds: reduce memory footprint during ib_post_recv in IB transport")
Orabug: 28947481

Change-Id: Ib032cd170d28e403a888c86124b67892b25ed5a5
Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>
Reported-by: Joe Jin <joe.jin@oracle.com>
Reviewed-by: Joe Jin <joe.jin@oracle.com>
Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
net/rds/ib_recv.c

index a0076edc5fc99df0724cee07e893b38dbd3b2a91..d07bc2b285ece33bdd2fe286fd9779dde96bc206 100644 (file)
@@ -57,7 +57,6 @@ MODULE_PARM_DESC(rds_ib_srq_lwm_refill, "SRQ LWM refill");
 static struct kmem_cache *rds_ib_incoming_slab;
 static struct kmem_cache *rds_ib_frag_slab;
 static atomic_t        rds_ib_allocation = ATOMIC_INIT(0);
-static unsigned long rds_ib_allocation_warn = 1;
 
 void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
 {
@@ -357,7 +356,6 @@ static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic
        struct list_head *cache_item;
        struct scatterlist *sg;
        struct scatterlist *s;
-       int avail_allocs;
        int ret;
        int i;
        int j;
@@ -368,21 +366,19 @@ static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic
                atomic_sub(ic->i_frag_sz/1024, &ic->i_cache_allocs);
                rds_ib_stats_add(s_ib_recv_removed_from_cache, ic->i_frag_sz);
        } else {
-               frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
-               if (!frag)
+               if (unlikely(atomic_add_return(ic->i_frag_pages,
+                                              &rds_ib_allocation) >=
+                   rds_ib_sysctl_max_recv_allocation)) {
+                       printk_once(KERN_NOTICE "RDS/IB: WARNING - recv memory exceeded max_recv_allocation %d\n",
+                                   atomic_read(&rds_ib_allocation));
+                       atomic_sub(ic->i_frag_pages, &rds_ib_allocation);
+                       rds_ib_stats_inc(s_ib_rx_alloc_limit);
                        return NULL;
+               }
 
-               avail_allocs = atomic_add_unless(&rds_ib_allocation,
-                                                ic->i_frag_pages,
-                                                rds_ib_sysctl_max_recv_allocation);
-               if (!avail_allocs) {
-                       if (test_and_clear_bit(0, &rds_ib_allocation_warn)) {
-                               printk(KERN_NOTICE "RDS/IB: WARNING - "
-                               "recv memory exceeded max_recv_allocation %d\n",
-                               atomic_read(&rds_ib_allocation));
-                       }
-                       rds_ib_stats_inc(s_ib_rx_alloc_limit);
-                       kmem_cache_free(rds_ib_frag_slab, frag);
+               frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
+               if (!frag) {
+                       atomic_sub(ic->i_frag_pages, &rds_ib_allocation);
                        return NULL;
                }