/* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs.
*/
- cm_id = rdma_create_id(rds_rdma_cm_event_handler,
- NULL, RDMA_PS_TCP, IB_QPT_RC);
+ cm_id = rds_ib_rdma_create_id(rds_rdma_cm_event_handler,
+ NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id))
return -EADDRNOTAVAIL;
addr, scope_id, ret,
cm_id->device ? cm_id->device->node_type : -1);
- rdma_destroy_id(cm_id);
+ rds_ib_rdma_destroy_id(cm_id);
return ret;
}
extern struct workqueue_struct *rds_ib_wq;
+static inline struct rds_connection *rds_ib_map_conn(struct rds_connection *conn)
+{
+ int id;
+
+ mutex_lock(&cm_id_map_lock);
+ id = idr_alloc_cyclic(&cm_id_map, conn, 0, 0, GFP_KERNEL);
+ mutex_unlock(&cm_id_map_lock);
+
+ if (id < 0)
+ return ERR_PTR(id);
+
+ return (struct rds_connection *)(unsigned long)id;
+}
+
+static inline struct rdma_cm_id *rds_ib_rdma_create_id(rdma_cm_event_handler event_handler,
+ void *context, enum rdma_port_space ps,
+ enum ib_qp_type qp_type)
+{
+ return rdma_create_id(event_handler,
+ rds_ib_map_conn(context), ps, qp_type);
+}
+
+static inline void rds_ib_rdma_destroy_id(struct rdma_cm_id *cm_id)
+{
+ mutex_lock(&cm_id_map_lock);
+ (void)idr_remove(&cm_id_map, (int)(u64)cm_id->context);
+ mutex_unlock(&cm_id_map_lock);
+ rdma_destroy_id(cm_id);
+}
+
+static inline struct rds_connection *rds_ib_get_conn(struct rdma_cm_id *cm_id)
+{
+ struct rds_connection *conn;
+
+ mutex_lock(&cm_id_map_lock);
+ conn = idr_find(&cm_id_map, (unsigned long)cm_id->context);
+ mutex_unlock(&cm_id_map_lock);
+
+ return conn;
+}
+
+static inline bool rds_ib_same_cm_id(struct rds_ib_connection *ic, struct rdma_cm_id *cm_id)
+{
+ if (ic) {
+ if (ic->i_cm_id != cm_id) {
+ rds_rtd_ptr(RDS_RTD_CM_EXT,
+ "conn %p ic->cm_id %p NE cm_id %p\n",
+ ic->conn, ic->i_cm_id, cm_id);
+ return false;
+ }
+ if (ic->i_cm_id->context != cm_id->context) {
+ rds_rtd_ptr(RDS_RTD_CM_EXT,
+ "conn %p ic->cm_id %p cm_id %p ctx1 %p NE ctx2 %p\n",
+ ic->conn, ic->i_cm_id, cm_id,
+ ic->i_cm_id->context, cm_id->context);
+ return false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
/*
* Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
* doesn't define it.
* in both of the cases below, the conn is half setup.
* we need to make sure the lower layers don't destroy it
*/
- if (ic && ic->i_cm_id == cm_id)
+ if (rds_ib_same_cm_id(ic, cm_id))
destroy = 0;
if (rds_conn_state(conn) == RDS_CONN_UP) {
rds_rtd(RDS_RTD_CM_EXT_P,
rds_send_drop_acked(conn, be64_to_cpu(dp_cmn->ricpc_ack_seq),
NULL);
- BUG_ON(cm_id->context);
+ BUG_ON(rds_ib_get_conn(cm_id));
BUG_ON(ic->i_cm_id);
ic->i_cm_id = cm_id;
- cm_id->context = conn;
+ cm_id->context = rds_ib_map_conn(conn);
/* We got halfway through setting up the ib_connection, if we
* fail now, we have to take the long route out of this mess. */
int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
{
- struct rds_connection *conn = cm_id->context;
+ struct rds_connection *conn = rds_ib_get_conn(cm_id);
struct rds_ib_connection *ic = conn->c_transport_data;
struct rdma_conn_param conn_param;
union rds_ib_conn_priv dp;
* the cm_id. We should certainly not do it as long as we still
* "own" the cm_id. */
if (ret) {
- if (ic->i_cm_id == cm_id)
+ if (rds_ib_same_cm_id(ic, cm_id))
ret = 0;
}
handler = rds6_rdma_cm_event_handler;
else
handler = rds_rdma_cm_event_handler;
- ic->i_cm_id = rdma_create_id(handler, conn, RDMA_PS_TCP, IB_QPT_RC);
+ ic->i_cm_id = rds_ib_rdma_create_id(handler, conn, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ic->i_cm_id)) {
ret = PTR_ERR(ic->i_cm_id);
ic->i_cm_id = NULL;
- rds_rtd(RDS_RTD_ERR, "rdma_create_id() failed: %d\n", ret);
+ rds_rtd(RDS_RTD_ERR, "rds_ib_rdma_create_id() failed: %d\n", ret);
goto out;
}
if (ret) {
rds_rtd(RDS_RTD_ERR, "addr resolve failed for cm id %p: %d\n",
ic->i_cm_id, ret);
- rdma_destroy_id(ic->i_cm_id);
+ rds_ib_rdma_destroy_id(ic->i_cm_id);
ic->i_cm_id = NULL;
}
if (ic->i_recvs)
rds_ib_recv_clear_ring(ic);
- rdma_destroy_id(ic->i_cm_id);
+ rds_ib_rdma_destroy_id(ic->i_cm_id);
/*
* Move connection back to the nodev list.
avail--;
if (avail < wanted) {
- struct rds_connection *conn = ic->i_cm_id->context;
+ struct rds_connection *conn = rds_ib_get_conn(ic->i_cm_id);
/* Oops, there aren't that many credits left! */
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
#define RDS_REJ_CONSUMER_DEFINED 28
+struct mutex cm_id_map_lock;
+DEFINE_IDR(cm_id_map);
/* Global IPv4 and IPv6 RDS RDMA listener cm_id */
static struct rdma_cm_id *rds_rdma_listen_id;
static struct rdma_cm_id *rds6_rdma_listen_id;
bool isv6)
{
/* this can be null in the listening path */
- struct rds_connection *conn = cm_id->context;
- struct rds_transport *trans;
+ struct rds_connection *conn;
+ struct rds_transport *trans = &rds_ib_transport;
int ret = 0;
int *err;
+ conn = rds_ib_get_conn(cm_id);
if (conn)
rds_rtd_ptr(RDS_RTD_CM,
"conn %p state %s cm_id %p <%pI6c,%pI6c,%d> handling event %u (%s) priv_dta_len %d\n",
"conn %p <%pI6c,%pI6c,%d> dropping connection after rdma_resolve_route failure %d\n",
conn, &conn->c_laddr, &conn->c_faddr, conn->c_tos, ret);
ibic = conn->c_transport_data;
- if (ibic && ibic->i_cm_id == cm_id)
+ if (rds_ib_same_cm_id(ibic, cm_id))
ibic->i_cm_id = NULL;
rds_conn_drop(conn, DR_IB_RESOLVE_ROUTE_FAIL);
}
struct rds_ib_connection *ibic;
ibic = conn->c_transport_data;
- if (ibic && ibic->i_cm_id == cm_id) {
+ if (rds_ib_same_cm_id(ibic, cm_id)) {
/* ibacm caches the path record without considering the tos/sl.
* It is considered a match if the <src,dest> matches the
* cache. In order to create qp with the correct sl/vl, RDS
struct rdma_cm_id *cm_id;
int ret;
- cm_id = rdma_create_id(handler, NULL, RDMA_PS_TCP, IB_QPT_RC);
+ cm_id = rds_ib_rdma_create_id(handler, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) {
ret = PTR_ERR(cm_id);
- printk(KERN_ERR "RDS/RDMA: failed to setup listener, rdma_create_id() returned %d\n",
+ printk(KERN_ERR "RDS/RDMA: failed to setup listener, rds_ib_rdma_create_id() returned %d\n",
ret);
return ret;
}
cm_id = NULL;
out:
if (cm_id)
- rdma_destroy_id(cm_id);
+ rds_ib_rdma_destroy_id(cm_id);
return ret;
}
{
if (rds_rdma_listen_id) {
rdsdebug("cm %p\n", rds_rdma_listen_id);
- rdma_destroy_id(rds_rdma_listen_id);
+ rds_ib_rdma_destroy_id(rds_rdma_listen_id);
rds_rdma_listen_id = NULL;
}
if (rds6_rdma_listen_id) {
rdsdebug("cm %p\n", rds6_rdma_listen_id);
- rdma_destroy_id(rds6_rdma_listen_id);
+ rds_ib_rdma_destroy_id(rds6_rdma_listen_id);
rds6_rdma_listen_id = NULL;
}
}
{
int ret;
+ mutex_init(&cm_id_map_lock);
+
ret = rds_ib_init();
if (ret)
goto out;
#define RDS_RDMA_RESOLVE_TIMEOUT_MS RDS_RECONNECT_RETRY_MS
+extern struct mutex cm_id_map_lock;
+extern struct idr cm_id_map;
+
/* Per IB specification 7.7.3, service level is a 4-bit field. */
#define TOS_TO_SL(tos) ((tos) & 0xF)