static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
static struct workqueue_struct *cma_wq;
+static struct workqueue_struct *cma_free_wq;
static DEFINE_IDR(tcp_ps);
static DEFINE_IDR(udp_ps);
static DEFINE_IDR(ipoib_ps);
struct completion comp;
atomic_t refcount;
struct mutex handler_mutex;
+ struct work_struct work; /* garbage coll */
int backlog;
int timeout_ms;
}
}
}
+static void __rdma_free(struct work_struct *work)
+{
+ struct rdma_id_private *id_priv;
+ id_priv = container_of(work, struct rdma_id_private, work);
+
+ wait_for_completion(&id_priv->comp);
+
+ if (id_priv->internal_id)
+ cma_deref_id(id_priv->id.context);
+
+ kfree(id_priv->id.route.path_rec);
+ kfree(id_priv);
+}
void rdma_destroy_id(struct rdma_cm_id *id)
{
cma_release_port(id_priv);
cma_deref_id(id_priv);
- wait_for_completion(&id_priv->comp);
-
- if (id_priv->internal_id)
- cma_deref_id(id_priv->id.context);
-
- kfree(id_priv->id.route.path_rec);
- kfree(id_priv);
+ INIT_WORK(&id_priv->work, __rdma_free);
+ queue_work(cma_free_wq, &id_priv->work);
}
EXPORT_SYMBOL(rdma_destroy_id);
static int __init cma_init(void)
{
- int ret;
+ int ret = -ENOMEM;
cma_wq = create_singlethread_workqueue("rdma_cm");
if (!cma_wq)
return -ENOMEM;
+ cma_free_wq = create_singlethread_workqueue("rdma_cm_fr");
+ if (!cma_free_wq)
+ goto err1;
+
ib_sa_register_client(&sa_client);
rdma_addr_register_client(&addr_client);
register_netdevice_notifier(&cma_nb);
unregister_netdevice_notifier(&cma_nb);
rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
+
+ destroy_workqueue(cma_free_wq);
+err1:
destroy_workqueue(cma_wq);
return ret;
}
unregister_netdevice_notifier(&cma_nb);
rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
+ flush_workqueue(cma_free_wq);
+ destroy_workqueue(cma_free_wq);
destroy_workqueue(cma_wq);
idr_destroy(&tcp_ps);
idr_destroy(&udp_ps);