]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
RDS: Fix potential race around rds_i[bw]_allocation
authorAndy Grover <andy.grover@oracle.com>
Thu, 13 Aug 2009 20:30:36 +0000 (13:30 -0700)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Tue, 7 Jul 2015 23:41:18 +0000 (16:41 -0700)
From Shin Hong:

"At rds_ib_recv_refill_one(), it first executes atomic_read(&rds_ib_allocation)
for if-condition checking,

and then executes atomic_inc(&rds_ib_allocation) if the condition was
not satisfied.

However, if any other code which updates rds_ib_allocation executes
between these two atomic operation executions,
it seems that it may result race condition. (especially when
rds_ib_allocation + 1 == rds_ib_sysctl_max_recv_allocation)"

This patch fixes this by using atomic_inc_unless to eliminate the
possibility of allocating more than rds_ib_sysctl_max_recv_allocation
and then decrementing the count if the allocation fails. It also
makes an identical change to the iwarp transport.

Reported-by: Shin Hong <hongshin@gmail.com>
Signed-off-by: Andy Grover <andy.grover@oracle.com>
net/rds/ib_recv.c
net/rds/iw_recv.c

index 1654d59a513b8847d8d39a3320bcf4efe78adc50..42b2206dce36355f839260463f4a525549925136 100644 (file)
@@ -143,15 +143,16 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
        int ret = -ENOMEM;
 
        if (recv->r_ibinc == NULL) {
-               if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) {
+               if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) {
                        rds_ib_stats_inc(s_ib_rx_alloc_limit);
                        goto out;
                }
                recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab,
                                                 kptr_gfp);
-               if (recv->r_ibinc == NULL)
+               if (recv->r_ibinc == NULL) {
+                       atomic_dec(&rds_ib_allocation);
                        goto out;
-               atomic_inc(&rds_ib_allocation);
+               }
                INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
                rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
        }
index fde470fa50d5457c72226d0336e00aeed8df534d..b4447432d0f3cde605bf7beae8b31eea62ad9492 100644 (file)
@@ -143,15 +143,16 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
        int ret = -ENOMEM;
 
        if (recv->r_iwinc == NULL) {
-               if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) {
+               if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
                        rds_iw_stats_inc(s_iw_rx_alloc_limit);
                        goto out;
                }
                recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
                                                 kptr_gfp);
-               if (recv->r_iwinc == NULL)
+               if (recv->r_iwinc == NULL) {
+                       atomic_dec(&rds_iw_allocation);
                        goto out;
-               atomic_inc(&rds_iw_allocation);
+               }
                INIT_LIST_HEAD(&recv->r_iwinc->ii_frags);
                rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
        }