__srpt_close_all_ch(sport);
 }
 
+static void srpt_drop_sport_ref(struct srpt_port *sport)
+{
+       if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
+               complete(sport->freed_channels);
+}
+
 static void srpt_free_ch(struct kref *kref)
 {
        struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
 
+       srpt_drop_sport_ref(ch->sport);
        kfree_rcu(ch, rcu);
 }
 
 
        kmem_cache_destroy(ch->req_buf_cache);
 
-       wake_up(&sport->ch_releaseQ);
-
        kref_put(&ch->kref, srpt_free_ch);
 }
 
                goto destroy_ib;
        }
 
+       /*
+        * Once a session has been created destruction of srpt_rdma_ch objects
+        * will decrement sport->refcount. Hence increment sport->refcount now.
+        */
+       atomic_inc(&sport->refcount);
+
        mutex_lock(&sport->mutex);
 
        if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
        srpt_refresh_port(sport);
 }
 
-static bool srpt_ch_list_empty(struct srpt_port *sport)
-{
-       struct srpt_nexus *nexus;
-       bool res = true;
-
-       rcu_read_lock();
-       list_for_each_entry(nexus, &sport->nexus_list, entry)
-               if (!list_empty(&nexus->ch_list))
-                       res = false;
-       rcu_read_unlock();
-
-       return res;
-}
-
 /**
  * srpt_release_sport - disable login and wait for associated channels
  * @sport: SRPT HCA port.
  */
 static int srpt_release_sport(struct srpt_port *sport)
 {
+       DECLARE_COMPLETION_ONSTACK(c);
        struct srpt_nexus *nexus, *next_n;
        struct srpt_rdma_ch *ch;
 
        WARN_ON_ONCE(irqs_disabled());
 
+       sport->freed_channels = &c;
+
        mutex_lock(&sport->mutex);
        srpt_set_enabled(sport, false);
        mutex_unlock(&sport->mutex);
 
-       while (wait_event_timeout(sport->ch_releaseQ,
-                                 srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
-               pr_info("%s_%d: waiting for session unregistration ...\n",
-                       dev_name(&sport->sdev->device->dev), sport->port);
+       while (atomic_read(&sport->refcount) > 0 &&
+              wait_for_completion_timeout(&c, 5 * HZ) <= 0) {
+               pr_info("%s_%d: waiting for unregistration of %d sessions ...\n",
+                       dev_name(&sport->sdev->device->dev), sport->port,
+                       atomic_read(&sport->refcount));
                rcu_read_lock();
                list_for_each_entry(nexus, &sport->nexus_list, entry) {
                        list_for_each_entry(ch, &nexus->ch_list, list) {
        for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
                sport = &sdev->port[i - 1];
                INIT_LIST_HEAD(&sport->nexus_list);
-               init_waitqueue_head(&sport->ch_releaseQ);
                mutex_init(&sport->mutex);
                sport->sdev = sdev;
                sport->port = i;
 
  * @port_gid_tpg:  TPG associated with target port GID.
  * @port_gid_wwn:  WWN associated with target port GID.
  * @port_attrib:   Port attributes that can be accessed through configfs.
- * @ch_releaseQ:   Enables waiting for removal from nexus_list.
+ * @refcount:     Number of objects associated with this port.
+ * @freed_channels: Completion that will be signaled once @refcount becomes 0.
  * @mutex:        Protects nexus_list.
  * @nexus_list:           Nexus list. See also srpt_nexus.entry.
  */
        struct se_portal_group  port_gid_tpg;
        struct se_wwn           port_gid_wwn;
        struct srpt_port_attrib port_attrib;
-       wait_queue_head_t       ch_releaseQ;
+       atomic_t                refcount;
+       struct completion       *freed_channels;
        struct mutex            mutex;
        struct list_head        nexus_list;
 };