}
EXPORT_SYMBOL(rdma_set_reuseaddr);
+int rdma_notify_addr_change(struct sockaddr *addr)
+{
+ struct cma_device *cma_dev;
+ struct rdma_id_private *id_priv;
+ struct sockaddr *src_addr;
+ struct cma_ndev_work *work;
+ int ret;
+
+ mutex_lock(&lock);
+ list_for_each_entry(cma_dev, &dev_list, list) {
+ list_for_each_entry(id_priv, &cma_dev->id_list, list) {
+ src_addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
+ if (addr->sa_family == AF_INET &&
+ addr->sa_family == src_addr->sa_family &&
+ ((struct sockaddr_in *) addr)->sin_addr.s_addr ==
+ ((struct sockaddr_in *) src_addr)->sin_addr.s_addr) {
+ work = kzalloc(sizeof *work, GFP_ATOMIC);
+ if (!work) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_WORK(&work->work, cma_ndev_work_handler);
+ work->id = id_priv;
+ work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
+ atomic_inc(&id_priv->refcount);
+ queue_work(cma_wq, &work->work);
+ }
+ }
+ }
+out:
+ mutex_unlock(&lock);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_notify_addr_change);
+
int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
{
struct rdma_id_private *id_priv;
*/
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr);
+/**
+ * rdma_notify_addr_change - Notify users that its IP has moved to a
+ * different device.
+ *
+ * @addr: address information.
+ */
+int rdma_notify_addr_change(struct sockaddr *addr);
+
/**
* rdma_resolve_addr - Resolve destination and optional source addresses
* from IP addresses to an RDMA address. If successful, the specified
kfree(work);
}
+static void rds_ib_notify_addr_change(struct work_struct *_work)
+{
+ struct rds_ib_addr_change_work *work =
+ container_of(_work, struct rds_ib_addr_change_work, work.work);
+ struct sockaddr_in sin;
+ int ret;
+
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = work->addr;
+ sin.sin_port = 0;
+
+ ret = rdma_notify_addr_change((struct sockaddr *)&sin);
+
+ kfree(work);
+}
+
static int rds_ib_move_ip(char *from_dev,
char *to_dev,
u8 from_port,
struct rds_ib_connection *ic, *ic2;
struct rds_ib_device *rds_ibdev;
struct rds_ib_conn_drop_work *work;
+ struct rds_ib_addr_change_work *work_addrchange;
page = alloc_page(GFP_HIGHUSER);
if (!page) {
}
}
spin_unlock_bh(&rds_ibdev->spinlock);
+
+ work_addrchange = kzalloc(sizeof *work, GFP_ATOMIC);
+ if (!work_addrchange) {
+ printk(KERN_WARNING "RDS/IB: failed to allocate work\n");
+ goto out;
+ }
+ work_addrchange->addr = addr;
+ INIT_DELAYED_WORK(&work_addrchange->work, rds_ib_notify_addr_change);
+ queue_delayed_work(rds_wq, &work_addrchange->work, 10);
}
out:
struct rds_ib_connection *conn;
};
+struct rds_ib_addr_change_work {
+ struct delayed_work work;
+ __be32 addr;
+};
+
enum {
RDS_IB_MR_8K_POOL,
RDS_IB_MR_1M_POOL,
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
+ rdsdebug("ADDR_CHANGE event <%u.%u.%u.%u,%u.%u.%u.%u>\n",
+ NIPQUAD(conn->c_laddr), NIPQUAD(conn->c_faddr));
#if RDMA_RDS_APM_SUPPORTED
if (conn && !rds_ib_apm_enabled)
rds_conn_drop(conn);