]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sdp: device_removal_lock should not be a spinlock because module removal takes a...
authorEldad Zinger <eldadz@mellanox.co.il>
Sun, 4 Jul 2010 13:30:43 +0000 (16:30 +0300)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Tue, 6 Oct 2015 12:05:08 +0000 (05:05 -0700)
In order to prevent a situation that many CPUs are stack with read-spinlock
waiting for remove-device write-lock to be freed, I replaced the rw-spinlock
with rw-semaphore. That way the blocked processes goes to sleep and are not
blocking other processes.

Signed-off-by: Eldad Zinger <eldadz@mellanox.co.il>
drivers/infiniband/ulp/sdp/sdp.h
drivers/infiniband/ulp/sdp/sdp_cma.c
drivers/infiniband/ulp/sdp/sdp_main.c

index fd98ca905024b929dfd6b82c816bea09c5c00868..0f6babb033bda2c01929693d00788dc7b279dac0 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/workqueue.h>
 #include <linux/wait.h>
+#include <linux/rwsem.h>
 #include <net/inet_sock.h>
 #include <net/tcp.h> /* For urgent data flags */
 #include <rdma/ib_verbs.h>
@@ -800,7 +801,7 @@ void sdp_reset_sk(struct sock *sk, int rc);
 void sdp_reset(struct sock *sk);
 int sdp_tx_wait_memory(struct sdp_sock *ssk, long *timeo_p, int *credits_needed);
 void sdp_skb_entail(struct sock *sk, struct sk_buff *skb);
-extern rwlock_t device_removal_lock;
+extern struct rw_semaphore device_removal_lock;
 
 /* sdp_proc.c */
 int __init sdp_proc_init(void);
index a94057978318081144073ad780cf5fc7a5658536..35532ad00c8c8aa742308287390ac7074cf05727 100644 (file)
@@ -175,18 +175,18 @@ static int sdp_connect_handler(struct sock *sk, struct rdma_cm_id *id,
        bh_unlock_sock(child);
        __sock_put(child, SOCK_REF_CLONE);
 
-       read_lock(&device_removal_lock);
+       down_read(&device_removal_lock);
 
        rc = sdp_init_qp(child, id);
        if (rc) {
-               read_unlock(&device_removal_lock);
+               up_read(&device_removal_lock);
                sdp_sk(child)->destructed_already = 1;
                sk_free(child);
                return rc;
        }
 
        sdp_add_sock(sdp_sk(child));
-       read_unlock(&device_removal_lock);
+       up_read(&device_removal_lock);
 
        sdp_sk(child)->max_bufs = ntohs(h->bsdh.bufs);
        atomic_set(&sdp_sk(child)->tx_ring.credits, sdp_sk(child)->max_bufs);
index 9acff248984ebdbdd3d53440f1b9b052eb846dc1..7c8494c3deb456966dcbba3f11939a43225e335f 100644 (file)
@@ -119,7 +119,7 @@ struct workqueue_struct *rx_comp_wq;
 struct list_head sock_list;
 spinlock_t sock_list_lock;
 
-DEFINE_RWLOCK(device_removal_lock);
+DECLARE_RWSEM(device_removal_lock);
 
 static inline unsigned int sdp_keepalive_time_when(const struct sdp_sock *ssk)
 {
@@ -521,10 +521,10 @@ static void sdp_destruct(struct sock *sk)
 
        ssk->destructed_already = 1;
 
-       read_lock(&device_removal_lock);
+       down_read(&device_removal_lock);
        sdp_remove_sock(ssk);
        sdp_destroy_resources(sk);
-       read_unlock(&device_removal_lock);
+       up_read(&device_removal_lock);
 
        flush_scheduled_work();
 
@@ -2810,7 +2810,7 @@ static void sdp_remove_device(struct ib_device *device)
 
        /* destroy_ids: */
 do_next:
-       write_lock(&device_removal_lock);
+       down_write(&device_removal_lock);
 
        spin_lock_irq(&sock_list_lock);
        list_for_each_entry(ssk, &sock_list, sock_list) {
@@ -2825,7 +2825,7 @@ do_next:
                        ssk->id_destroyed_already = 1;
 
                        release_sock(sk);
-                       write_unlock(&device_removal_lock);
+                       up_write(&device_removal_lock);
 
                        if (id)
                                rdma_destroy_id(id);
@@ -2859,7 +2859,7 @@ kill_socks:
 
        spin_unlock_irq(&sock_list_lock);
 
-       write_unlock(&device_removal_lock);
+       up_write(&device_removal_lock);
 
        if (!sdp_dev)
                return;