#include "ib.h"
static char *rds_ib_event_type_strings[] = {
-#define RDS_IB_EVENT_STRING(foo) [IB_EVENT_##foo] = __stringify(foo)
+#define RDS_IB_EVENT_STRING(foo)[IB_EVENT_##foo] = __stringify(foo)
RDS_IB_EVENT_STRING(CQ_ERR),
RDS_IB_EVENT_STRING(QP_FATAL),
RDS_IB_EVENT_STRING(QP_REQ_ERR),
}
}
- if (conn->c_version < RDS_PROTOCOL(3,2)) {
- if (conn->c_version == RDS_PROTOCOL(3,1)) {
- if (conn->c_tos) {
- printk(KERN_NOTICE "RDS: Connection to"
- " %u.%u.%u.%u version %u.%u Tos %d"
- " failed, not supporting QoS\n",
- NIPQUAD(conn->c_faddr),
- RDS_PROTOCOL_MAJOR(conn->c_version),
- RDS_PROTOCOL_MINOR(conn->c_version),
- conn->c_tos);
- rds_conn_drop(conn);
- return;
- }
- } else {
- /*
- * BUG: destroying connection here can deadlock with
- * the CM event handler on the c_cm_lock.
- */
- printk(KERN_NOTICE "RDS/IB: Connection to"
- " %u.%u.%u.%u version %u.%u failed,"
- " no longer supported\n",
- NIPQUAD(conn->c_faddr),
- RDS_PROTOCOL_MAJOR(conn->c_version),
- RDS_PROTOCOL_MINOR(conn->c_version));
- rds_conn_destroy(conn);
- return;
- }
- }
-
- printk(KERN_NOTICE
- "RDS/IB: connected to %u.%u.%u.%u version %u.%u%s Tos %d\n",
- NIPQUAD(conn->c_faddr),
- RDS_PROTOCOL_MAJOR(conn->c_version),
- RDS_PROTOCOL_MINOR(conn->c_version),
- ic->i_flowctl ? ", flow control" : "",
- conn->c_tos);
-
- ic->i_sl = ic->i_cm_id->route.path_rec->sl;
+ if (conn->c_version < RDS_PROTOCOL(3, 2)) {
+ if (conn->c_version == RDS_PROTOCOL(3, 1)) {
+ if (conn->c_tos) {
+ printk(KERN_NOTICE "RDS: Connection to"
+ " %u.%u.%u.%u version %u.%u Tos %d"
+ " failed, not supporting QoS\n",
+ NIPQUAD(conn->c_faddr),
+ RDS_PROTOCOL_MAJOR(conn->c_version),
+ RDS_PROTOCOL_MINOR(conn->c_version),
+ conn->c_tos);
+ rds_conn_drop(conn);
+ return;
+ }
+ } else {
+ /*
+ * BUG: destroying connection here can deadlock with
+ * the CM event handler on the c_cm_lock.
+ */
+ printk(KERN_NOTICE "RDS/IB: Connection to"
+ " %u.%u.%u.%u version %u.%u failed,"
+ " no longer supported\n",
+ NIPQUAD(conn->c_faddr),
+ RDS_PROTOCOL_MAJOR(conn->c_version),
+ RDS_PROTOCOL_MINOR(conn->c_version));
+ rds_conn_destroy(conn);
+ return;
+ }
+ }
+
+ printk(KERN_NOTICE
+ "RDS/IB: connected to %u.%u.%u.%u version %u.%u%s Tos %d\n",
+ NIPQUAD(conn->c_faddr),
+ RDS_PROTOCOL_MAJOR(conn->c_version),
+ RDS_PROTOCOL_MINOR(conn->c_version),
+ ic->i_flowctl ? ", flow control" : "",
+ conn->c_tos);
+
+ ic->i_sl = ic->i_cm_id->route.path_rec->sl;
/*
* Init rings and fill recv. this needs to wait until protocol negotiation
*/
rds_ib_send_init_ring(ic);
- if (!ic->conn->c_tos)
- rds_ib_recv_init_ring(ic);
+ if (!ic->conn->c_tos)
+ rds_ib_recv_init_ring(ic);
/* Post receive buffers - as a side effect, this will update
* the posted credit count. */
- if (!ic->conn->c_tos) {
- rds_ib_recv_refill(conn, 1, GFP_KERNEL);
- }
+ if (!ic->conn->c_tos)
+ rds_ib_recv_refill(conn, 1, GFP_KERNEL);
/* Tune RNR behavior */
rds_ib_tune_rnr(ic, &qp_attr);
if (rds_conn_up(conn))
rds_ib_attempt_ack(ic);
- if (conn->c_tos) {
- if ((atomic_read(&rds_ibdev->srq->s_num_posted) <
- rds_ib_srq_refill_wr) &&
- !test_and_set_bit(0, &rds_ibdev->srq->s_refill_gate))
- queue_delayed_work(rds_wq, &rds_ibdev->srq->s_refill_w,0);
-
- }
+ if (conn->c_tos)
+ if ((atomic_read(&rds_ibdev->srq->s_num_posted) <
+ rds_ib_srq_refill_wr) &&
+ !test_and_set_bit(0, &rds_ibdev->srq->s_refill_gate))
+ queue_delayed_work(rds_wq, &rds_ibdev->srq->s_refill_w, 0);
}
static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
case IB_EVENT_COMM_EST:
rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
break;
- case IB_EVENT_QP_LAST_WQE_REACHED:
- complete(&ic->i_last_wqe_complete);
- break;
+ case IB_EVENT_QP_LAST_WQE_REACHED:
+ complete(&ic->i_last_wqe_complete);
+ break;
default:
rdsdebug("Fatal QP Event %u (%s) "
"- connection %pI4->%pI4, reconnecting\n",
goto out;
}
- if (ic->conn->c_tos)
- ic->i_rcq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
- rds_ib_cq_event_handler, conn,
- rds_ib_srq_max_wr - 1,
- IB_CQ_VECTOR_LEAST_ATTACHED);
- else
- ic->i_rcq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
- rds_ib_cq_event_handler, conn,
- ic->i_recv_ring.w_nr,
- IB_CQ_VECTOR_LEAST_ATTACHED);
+ if (ic->conn->c_tos)
+ ic->i_rcq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
+ rds_ib_cq_event_handler, conn,
+ rds_ib_srq_max_wr - 1,
+ IB_CQ_VECTOR_LEAST_ATTACHED);
+ else
+ ic->i_rcq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
+ rds_ib_cq_event_handler, conn,
+ rds_ib_srq_max_wr - 1,
+ IB_CQ_VECTOR_LEAST_ATTACHED);
if (IS_ERR(ic->i_rcq)) {
ret = PTR_ERR(ic->i_rcq);
ic->i_rcq = NULL;
attr.send_cq = ic->i_scq;
attr.recv_cq = ic->i_rcq;
- if (ic->conn->c_tos) {
- attr.cap.max_recv_wr = 0;
- attr.srq = rds_ibdev->srq->s_srq;
- }
+ if (ic->conn->c_tos) {
+ attr.cap.max_recv_wr = 0;
+ attr.srq = rds_ibdev->srq->s_srq;
+ }
/*
* XXX this can fail if max_*_wr is too large? Are we supposed
goto out;
}
- if (!ic->conn->c_tos) {
- ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
- ic->i_recv_ring.w_nr *
- sizeof(struct rds_header),
- &ic->i_recv_hdrs_dma, GFP_KERNEL);
- if (!ic->i_recv_hdrs) {
- ret = -ENOMEM;
- rdsdebug("ib_dma_alloc_coherent recv failed\n");
- goto out;
- }
- }
+ if (!ic->conn->c_tos) {
+ ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
+ ic->i_recv_ring.w_nr *
+ sizeof(struct rds_header),
+ &ic->i_recv_hdrs_dma, GFP_KERNEL);
+ if (!ic->i_recv_hdrs) {
+ ret = -ENOMEM;
+ rdsdebug("ib_dma_alloc_coherent recv failed\n");
+ goto out;
+ }
+ }
ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
&ic->i_ack_dma, GFP_KERNEL);
}
memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
- if (!ic->conn->c_tos) {
- ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr *
- sizeof(struct rds_ib_recv_work));
- if (!ic->i_recvs) {
- ret = -ENOMEM;
- rdsdebug("recv allocation failed\n");
- goto out;
- }
- memset(ic->i_recvs, 0, ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
- }
+ if (!ic->conn->c_tos) {
+ ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr *
+ sizeof(struct rds_ib_recv_work));
+ if (!ic->i_recvs) {
+ ret = -ENOMEM;
+ rdsdebug("recv allocation failed\n");
+ goto out;
+ }
+ memset(ic->i_recvs, 0, ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
+ }
rds_ib_recv_init_ack(ic);
}
/* Even if len is crap *now* I still want to check it. -ASG */
- if (event->param.conn.private_data_len < sizeof (*dp)
+ if (event->param.conn.private_data_len < sizeof(*dp)
|| dp->dp_protocol_major == 0)
return RDS_PROTOCOL_3_0;
if (now > conn->c_connection_start &&
now - conn->c_connection_start > 15) {
printk(KERN_CRIT "rds connection racing for 15s, forcing reset "
- "connection %u.%u.%u.%u->%u.%u.%u.%u\n",
- NIPQUAD(conn->c_laddr), NIPQUAD(conn->c_faddr));
+ "connection %u.%u.%u.%u->%u.%u.%u.%u\n",
+ NIPQUAD(conn->c_laddr), NIPQUAD(conn->c_faddr));
rds_conn_drop(conn);
rds_ib_stats_inc(s_ib_listen_closed_stale);
} else {
*/
rdsdebug("failed to disconnect, cm: %p err %d\n",
ic->i_cm_id, err);
- } else if (ic->conn->c_tos && ic->rds_ibdev) {
- /*
- * wait for the last wqe to complete, then schedule
- * the recv tasklet to drain the RX CQ.
- */
- wait_for_completion(&ic->i_last_wqe_complete);
- tasklet_schedule(&ic->i_rtasklet);
+ } else if (ic->conn->c_tos && ic->rds_ibdev) {
+ /*
+ wait for the last wqe to complete, then schedule
+ the recv tasklet to drain the RX CQ.
+ */
+ wait_for_completion(&ic->i_last_wqe_complete);
+ tasklet_schedule(&ic->i_rtasklet);
}
/* quiesce tx and rx completion before tearing down */
vfree(ic->i_sends);
ic->i_sends = NULL;
- if (!ic->conn->c_tos)
- vfree(ic->i_recvs);
+ if (!ic->conn->c_tos)
+ vfree(ic->i_recvs);
ic->i_recvs = NULL;
-
+
INIT_COMPLETION(ic->i_last_wqe_complete);
}
ic->conn = conn;
conn->c_transport_data = ic;
-
+
init_completion(&ic->i_last_wqe_complete);
spin_lock_irqsave(&ib_nodev_conns_lock, flags);
* A race with shutdown() or connect() would cause problems
* (since rds_ibdev would change) but that should never happen.
*/
+
lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
spin_lock_irq(lock_ptr);
cache->percpu = alloc_percpu(struct rds_ib_cache_head);
if (!cache->percpu)
- return -ENOMEM;
+ return -ENOMEM;
for_each_possible_cpu(cpu) {
head = per_cpu_ptr(cache->percpu, cpu);
ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
} else {
ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
- if (!ibinc) {
+ if (!ibinc)
return NULL;
- }
rds_ib_stats_inc(s_ib_rx_total_incs);
}
INIT_LIST_HEAD(&ibinc->ii_frags);
return ibinc;
}
-static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
+static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
gfp_t slab_mask, gfp_t page_mask)
{
struct rds_page_frag *frag;
if (!avail_allocs) {
if (test_and_clear_bit(0, &rds_ib_allocation_warn)) {
- printk(KERN_NOTICE "RDS/IB: WARNING - "
- "recv memory exceeded max_recv_allocation %d\n",
- atomic_read(&rds_ib_allocation));
- }
- rds_ib_stats_inc(s_ib_rx_alloc_limit);
- return NULL;
+ printk(KERN_NOTICE "RDS/IB: WARNING - "
+ "recv memory exceeded max_recv_allocation %d\n",
+ atomic_read(&rds_ib_allocation));
+ }
+ rds_ib_stats_inc(s_ib_rx_alloc_limit);
+ return NULL;
}
sg_init_table(&frag->f_sg, 1);
}
static void rds_ib_srq_clear_one(struct rds_ib_srq *srq,
- struct rds_ib_connection *ic,
- struct rds_ib_recv_work *recv)
+ struct rds_ib_connection *ic,
+ struct rds_ib_recv_work *recv)
{
- if (recv->r_ibinc) {
- rds_inc_put(&recv->r_ibinc->ii_inc);
- recv->r_ibinc = NULL;
- }
- if (recv->r_frag) {
- ib_dma_unmap_sg(srq->rds_ibdev->dev, &recv->r_frag->f_sg,
- 1, DMA_FROM_DEVICE);
- rds_ib_frag_free(ic, recv->r_frag);
- recv->r_frag = NULL;
- recv->r_ic = ic;
- recv->r_posted = 0;
- }
+ if (recv->r_ibinc) {
+ rds_inc_put(&recv->r_ibinc->ii_inc);
+ recv->r_ibinc = NULL;
+ }
+ if (recv->r_frag) {
+ ib_dma_unmap_sg(srq->rds_ibdev->dev, &recv->r_frag->f_sg,
+ 1, DMA_FROM_DEVICE);
+ rds_ib_frag_free(ic, recv->r_frag);
+ recv->r_frag = NULL;
+ recv->r_ic = ic;
+ recv->r_posted = 0;
+ }
}
static int rds_ib_srq_refill_one(struct rds_ib_srq *srq,
- struct rds_ib_connection *ic,
- struct rds_ib_recv_work *recv, gfp_t gfp)
+ struct rds_ib_connection *ic,
+ struct rds_ib_recv_work *recv, gfp_t gfp)
{
- struct ib_sge *sge;
- int ret = -ENOMEM;
- gfp_t slab_mask = GFP_NOWAIT;
- gfp_t page_mask = GFP_NOWAIT;
-
- if (gfp & __GFP_WAIT) {
- slab_mask = GFP_KERNEL;
- page_mask = GFP_HIGHUSER;
- }
-
- if (!ic->i_cache_incs.ready)
- rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
- if (!ic->i_cache_frags.ready)
- rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
-
- /*
- * ibinc was taken from recv if recv contained the start of a message.
- * recvs that were continuations will still have this allocated.
- */
- if (!recv->r_ibinc) {
- recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
- if (!recv->r_ibinc)
- goto out;
- }
-
- WARN_ON_ONCE(recv->r_frag); /* leak! */
- recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
- if (!recv->r_frag)
- goto out;
-
- ret = ib_dma_map_sg(srq->rds_ibdev->dev, &recv->r_frag->f_sg,
- 1, DMA_FROM_DEVICE);
-
- WARN_ON(ret != 1);
-
- sge = &recv->r_sge[0];
-
- sge->addr = srq->s_recv_hdrs_dma +
- (recv - srq->s_recvs) *
- sizeof(struct rds_header);
-
- sge->length = sizeof(struct rds_header);
-
- sge = &recv->r_sge[1];
- sge->addr = sg_dma_address(&recv->r_frag->f_sg);
- sge->length = sg_dma_len(&recv->r_frag->f_sg);
-
- ret = 0;
+ struct ib_sge *sge;
+ int ret = -ENOMEM;
+ gfp_t slab_mask = GFP_NOWAIT;
+ gfp_t page_mask = GFP_NOWAIT;
+
+ if (gfp & __GFP_WAIT) {
+ slab_mask = GFP_KERNEL;
+ page_mask = GFP_HIGHUSER;
+ }
+
+ if (!ic->i_cache_incs.ready)
+ rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
+ if (!ic->i_cache_frags.ready)
+ rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
+
+ /*
+ * ibinc was taken from recv if recv contained the start of a message.
+ * recvs that were continuations will still have this allocated.
+ */
+
+ if (!recv->r_ibinc) {
+ recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
+ if (!recv->r_ibinc)
+ goto out;
+ }
+
+ WARN_ON_ONCE(recv->r_frag); /* leak! */
+ recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
+ if (!recv->r_frag)
+ goto out;
+
+ ret = ib_dma_map_sg(srq->rds_ibdev->dev, &recv->r_frag->f_sg,
+ 1, DMA_FROM_DEVICE);
+
+ WARN_ON(ret != 1);
+
+ sge = &recv->r_sge[0];
+
+ sge->addr = srq->s_recv_hdrs_dma +
+ (recv - srq->s_recvs) *
+ sizeof(struct rds_header);
+
+ sge->length = sizeof(struct rds_header);
+
+ sge = &recv->r_sge[1];
+ sge->addr = sg_dma_address(&recv->r_frag->f_sg);
+ sge->length = sg_dma_len(&recv->r_frag->f_sg);
+
+ ret = 0;
out:
- return ret;
+ return ret;
}
static int rds_ib_srq_prefill_one(struct rds_ib_device *rds_ibdev,
- struct rds_ib_recv_work *recv, int prefill)
+ struct rds_ib_recv_work *recv, int prefill)
{
- struct ib_sge *sge;
- int ret = -ENOMEM;
- gfp_t slab_mask = GFP_NOWAIT;
- gfp_t page_mask = GFP_NOWAIT;
-
- if (prefill) {
- slab_mask = GFP_KERNEL;
- page_mask = GFP_HIGHUSER;
- }
-
- if (!recv->r_ibinc) {
- recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
- if (!recv->r_ibinc)
- goto out;
- rds_ib_stats_inc(s_ib_rx_total_incs);
- INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
- }
-
- WARN_ON_ONCE(recv->r_frag); /* leak! */
- recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
- if (!recv->r_frag)
- goto out;
- sg_init_table(&recv->r_frag->f_sg, 1);
- ret = rds_page_remainder_alloc(&recv->r_frag->f_sg,
- RDS_FRAG_SIZE, page_mask);
- if (ret) {
- kmem_cache_free(rds_ib_frag_slab, recv->r_frag);
- goto out;
- }
- rds_ib_stats_inc(s_ib_rx_total_frags);
- INIT_LIST_HEAD(&recv->r_frag->f_item);
-
- ret = ib_dma_map_sg(rds_ibdev->dev, &recv->r_frag->f_sg,
- 1, DMA_FROM_DEVICE);
- WARN_ON(ret != 1);
-
- sge = &recv->r_sge[0];
- sge->addr = rds_ibdev->srq->s_recv_hdrs_dma +
- (recv - rds_ibdev->srq->s_recvs) *
- sizeof(struct rds_header);
- sge->length = sizeof(struct rds_header);
- sge->lkey = rds_ibdev->mr->lkey;
-
- sge = &recv->r_sge[1];
- sge->addr = sg_dma_address(&recv->r_frag->f_sg);
- sge->length = sg_dma_len(&recv->r_frag->f_sg);
- sge->lkey = rds_ibdev->mr->lkey;
-
- ret = 0;
+ struct ib_sge *sge;
+ int ret = -ENOMEM;
+ gfp_t slab_mask = GFP_NOWAIT;
+ gfp_t page_mask = GFP_NOWAIT;
+
+ if (prefill) {
+ slab_mask = GFP_KERNEL;
+ page_mask = GFP_HIGHUSER;
+ }
+ if (!recv->r_ibinc) {
+ recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
+ if (!recv->r_ibinc)
+ goto out;
+ rds_ib_stats_inc(s_ib_rx_total_incs);
+ INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
+ }
+
+ WARN_ON_ONCE(recv->r_frag); /* leak! */
+ recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
+ if (!recv->r_frag)
+ goto out;
+ sg_init_table(&recv->r_frag->f_sg, 1);
+ ret = rds_page_remainder_alloc(&recv->r_frag->f_sg,
+ RDS_FRAG_SIZE, page_mask);
+ if (ret) {
+ kmem_cache_free(rds_ib_frag_slab, recv->r_frag);
+ goto out;
+ }
+
+ rds_ib_stats_inc(s_ib_rx_total_frags);
+ INIT_LIST_HEAD(&recv->r_frag->f_item);
+
+ ret = ib_dma_map_sg(rds_ibdev->dev, &recv->r_frag->f_sg,
+ 1, DMA_FROM_DEVICE);
+ WARN_ON(ret != 1);
+
+ sge = &recv->r_sge[0];
+ sge->addr = rds_ibdev->srq->s_recv_hdrs_dma +
+ (recv - rds_ibdev->srq->s_recvs) *
+ sizeof(struct rds_header);
+ sge->length = sizeof(struct rds_header);
+ sge->lkey = rds_ibdev->mr->lkey;
+
+ sge = &recv->r_sge[1];
+ sge->addr = sg_dma_address(&recv->r_frag->f_sg);
+ sge->length = sg_dma_len(&recv->r_frag->f_sg);
+ sge->lkey = rds_ibdev->mr->lkey;
+
+ ret = 0;
out:
- return ret;
+ return ret;
}
unsigned int posted = 0;
int ret = 0;
int can_wait = gfp & __GFP_WAIT;
- int must_wake = 0;
+ int must_wake = 0;
int ring_low = 0;
int ring_empty = 0;
u32 pos;
recv = &ic->i_recvs[pos];
ret = rds_ib_recv_refill_one(conn, recv, gfp);
- if (ret) {
+ if (ret)
break;
- }
/* XXX when can this fail? */
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
}
void rds_ib_srq_process_recv(struct rds_connection *conn,
- struct rds_ib_recv_work *recv, u32 data_len,
- struct rds_ib_ack_state *state)
+ struct rds_ib_recv_work *recv, u32 data_len,
+ struct rds_ib_ack_state *state)
{
- struct rds_ib_connection *ic = conn->c_transport_data;
- struct rds_ib_incoming *ibinc = ic->i_ibinc;
- struct rds_header *ihdr, *hdr;
-
- if (data_len < sizeof(struct rds_header)) {
- printk(KERN_WARNING "RDS: from %pI4 didn't inclue a "
- "header, disconnecting and "
- "reconnecting\n",
- &conn->c_faddr);
- rds_ib_frag_free(ic, recv->r_frag);
- recv->r_frag = NULL;
- return;
- }
- data_len -= sizeof(struct rds_header);
-
- ihdr = &ic->rds_ibdev->srq->s_recv_hdrs[recv->r_wr.wr_id];
-
- /* Validate the checksum. */
- if (!rds_message_verify_checksum(ihdr)) {
- printk(KERN_WARNING "RDS: from %pI4 has corrupted header - "
- "forcing a reconnect\n",
- &conn->c_faddr);
- rds_stats_inc(s_recv_drop_bad_checksum);
- rds_ib_frag_free(ic, recv->r_frag);
- recv->r_frag = NULL;
- return;
- }
-
- /* Process the ACK sequence which comes with every packet */
- state->ack_recv = be64_to_cpu(ihdr->h_ack);
- state->ack_recv_valid = 1;
-
- if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
- rds_ib_stats_inc(s_ib_ack_received);
- rds_ib_frag_free(ic, recv->r_frag);
- recv->r_frag = NULL;
- return;
- }
-
- if (!ibinc) {
- ibinc = recv->r_ibinc;
- rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
- recv->r_ibinc = NULL;
- ic->i_ibinc = ibinc;
- hdr = &ibinc->ii_inc.i_hdr;
- memcpy(hdr, ihdr, sizeof(*hdr));
- ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
- } else {
- hdr = &ibinc->ii_inc.i_hdr;
- if (hdr->h_sequence != ihdr->h_sequence
- || hdr->h_len != ihdr->h_len
- || hdr->h_sport != ihdr->h_sport
- || hdr->h_dport != ihdr->h_dport) {
- printk(KERN_WARNING "RDS: fragment header mismatch; "
- "forcing reconnect\n");
- rds_ib_frag_free(ic, recv->r_frag);
- recv->r_frag = NULL;
- return;
- }
- }
-
- list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
-
- recv->r_frag = NULL;
-
- if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
- ic->i_recv_data_rem -= RDS_FRAG_SIZE;
- else {
- ic->i_recv_data_rem = 0;
- ic->i_ibinc = NULL;
-
- if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
- rds_ib_cong_recv(conn, ibinc);
- else {
- rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
- &ibinc->ii_inc, GFP_ATOMIC,
- KM_SOFTIRQ0);
-
- state->ack_next = be64_to_cpu(hdr->h_sequence);
- state->ack_next_valid = 1;
- }
- if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
- rds_stats_inc(s_recv_ack_required);
- state->ack_required = 1;
- }
- rds_inc_put(&ibinc->ii_inc);
- }
+ struct rds_ib_connection *ic = conn->c_transport_data;
+ struct rds_ib_incoming *ibinc = ic->i_ibinc;
+ struct rds_header *ihdr, *hdr;
+
+ if (data_len < sizeof(struct rds_header)) {
+ printk(KERN_WARNING "RDS: from %pI4 didn't inclue a "
+ "header, disconnecting and "
+ "reconnecting\n",
+ &conn->c_faddr);
+ rds_ib_frag_free(ic, recv->r_frag);
+ recv->r_frag = NULL;
+ return;
+ }
+ data_len -= sizeof(struct rds_header);
+
+ ihdr = &ic->rds_ibdev->srq->s_recv_hdrs[recv->r_wr.wr_id];
+
+ /* Validate the checksum. */
+ if (!rds_message_verify_checksum(ihdr)) {
+ printk(KERN_WARNING "RDS: from %pI4 has corrupted header - "
+ "forcing a reconnect\n",
+ &conn->c_faddr);
+ rds_stats_inc(s_recv_drop_bad_checksum);
+ rds_ib_frag_free(ic, recv->r_frag);
+ recv->r_frag = NULL;
+ return;
+ }
+
+ /* Process the ACK sequence which comes with every packet */
+ state->ack_recv = be64_to_cpu(ihdr->h_ack);
+ state->ack_recv = be64_to_cpu(ihdr->h_ack);
+ state->ack_recv_valid = 1;
+
+ if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
+ rds_ib_stats_inc(s_ib_ack_received);
+ rds_ib_frag_free(ic, recv->r_frag);
+ recv->r_frag = NULL;
+ return;
+ }
+
+ if (!ibinc) {
+ ibinc = recv->r_ibinc;
+ rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
+ recv->r_ibinc = NULL;
+ ic->i_ibinc = ibinc;
+ hdr = &ibinc->ii_inc.i_hdr;
+ memcpy(hdr, ihdr, sizeof(*hdr));
+ ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
+ } else {
+ hdr = &ibinc->ii_inc.i_hdr;
+ if (hdr->h_sequence != ihdr->h_sequence
+ || hdr->h_len != ihdr->h_len
+ || hdr->h_sport != ihdr->h_sport
+ || hdr->h_dport != ihdr->h_dport) {
+ printk(KERN_WARNING "RDS: fragment header mismatch; "
+ "forcing reconnect\n");
+ rds_ib_frag_free(ic, recv->r_frag);
+ recv->r_frag = NULL;
+ return;
+ }
+ }
+
+ list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
+
+ recv->r_frag = NULL;
+
+ if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
+ ic->i_recv_data_rem -= RDS_FRAG_SIZE;
+ else {
+ ic->i_recv_data_rem = 0;
+ ic->i_ibinc = NULL;
+
+ if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
+ rds_ib_cong_recv(conn, ibinc);
+ else {
+ rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
+ &ibinc->ii_inc, GFP_ATOMIC,
+ KM_SOFTIRQ0);
+
+ state->ack_next = be64_to_cpu(hdr->h_sequence);
+ state->ack_next_valid = 1;
+ }
+ if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
+ rds_stats_inc(s_recv_ack_required);
+ state->ack_required = 1;
+ }
+ rds_inc_put(&ibinc->ii_inc);
+ }
}
void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
be32_to_cpu(wc->ex.imm_data));
rds_ib_stats_inc(s_ib_rx_cq_event);
-
+
if (conn->c_tos) {
recv = &rds_ibdev->srq->s_recvs[wc->wr_id];
- atomic_dec(&rds_ibdev->srq->s_num_posted);
+ atomic_dec(&rds_ibdev->srq->s_num_posted);
} else
recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
void rds_ib_srq_refill(struct work_struct *work)
{
- struct rds_ib_srq *srq = container_of(work, struct rds_ib_srq, s_refill_w.work);
- struct rds_ib_recv_work *prv=NULL, *cur=NULL, *tmp;
- struct ib_recv_wr *bad_wr;
- int i,refills=0,total_refills=0;
-
- if (!test_bit(0,&srq->s_refill_gate))
- return;
-
- rds_ib_stats_inc(s_ib_srq_refills);
-
- for (i=0; i<srq->s_n_wr; i++) {
- tmp = &srq->s_recvs[i];
- if (tmp->r_posted)
- continue;
-
- if (rds_ib_srq_refill_one(srq, tmp->r_ic, tmp, GFP_NOWAIT)) {
- printk(KERN_ERR "rds_ib_srq_refill_one failed\n");
- break;
- }
- cur = tmp;
-
- if (!prv) {
- prv = cur;
- prv->r_wr.next = NULL;
- } else {
- cur->r_wr.next = &prv->r_wr;
- prv = cur;
- }
- cur->r_posted = 1;
-
- total_refills++;
- if (++refills == RDS_IB_SRQ_POST_BATCH_COUNT) {
- if (ib_post_srq_recv(srq->s_srq, &cur->r_wr, &bad_wr)) {
- struct ib_recv_wr *wr;
- struct rds_ib_recv_work *recv;
-
- for (wr = &cur->r_wr; wr; wr = wr->next) {
- recv = container_of(wr, struct rds_ib_recv_work, r_wr);
- rds_ib_srq_clear_one(srq, recv->r_ic, recv);
- }
- printk(KERN_ERR "ib_post_srq_recv failed\n");
- goto out;
- }
-
- atomic_add(refills,&srq->s_num_posted);
- prv = NULL;
- refills = 0;
- cur = NULL;
- }
- }
- if (cur) {
- if (ib_post_srq_recv(srq->s_srq, &cur->r_wr, &bad_wr)) {
- struct ib_recv_wr *wr;
- struct rds_ib_recv_work *recv;
-
- for (wr = &cur->r_wr; wr; wr = wr->next) {
- recv = container_of(wr, struct rds_ib_recv_work, r_wr);
- rds_ib_srq_clear_one(srq, recv->r_ic, recv);
- }
- printk(KERN_ERR "ib_post_srq_recv failed\n");
- goto out;
- }
- atomic_add(refills,&srq->s_num_posted);
- }
-
- if (!total_refills)
- rds_ib_stats_inc(s_ib_srq_empty_refills);
+ struct rds_ib_srq *srq = container_of(work, struct rds_ib_srq, s_refill_w.work);
+ struct rds_ib_recv_work *prv = NULL, *cur = NULL, *tmp;
+ struct ib_recv_wr *bad_wr;
+ int i, refills = 0, total_refills = 0;
+
+ if (!test_bit(0, &srq->s_refill_gate))
+ return;
+
+ rds_ib_stats_inc(s_ib_srq_refills);
+
+ for (i = 0; i < srq->s_n_wr; i++) {
+ tmp = &srq->s_recvs[i];
+ if (tmp->r_posted)
+ continue;
+
+ if (rds_ib_srq_refill_one(srq, tmp->r_ic, tmp, GFP_NOWAIT)) {
+ printk(KERN_ERR "rds_ib_srq_refill_one failed\n");
+ break;
+ }
+ cur = tmp;
+
+ if (!prv) {
+ prv = cur;
+ prv->r_wr.next = NULL;
+ } else {
+ cur->r_wr.next = &prv->r_wr;
+ prv = cur;
+ }
+ cur->r_posted = 1;
+
+ total_refills++;
+ if (++refills == RDS_IB_SRQ_POST_BATCH_COUNT) {
+ if (ib_post_srq_recv(srq->s_srq, &cur->r_wr, &bad_wr)) {
+ struct ib_recv_wr *wr;
+ struct rds_ib_recv_work *recv;
+
+ for (wr = &cur->r_wr; wr; wr = wr->next) {
+ recv = container_of(wr, struct rds_ib_recv_work, r_wr);
+ rds_ib_srq_clear_one(srq, recv->r_ic, recv);
+ }
+ printk(KERN_ERR "ib_post_srq_recv failed\n");
+ goto out;
+ }
+ atomic_add(refills, &srq->s_num_posted);
+ prv = NULL;
+ refills = 0;
+ cur = NULL;
+ }
+ }
+ if (cur) {
+ if (ib_post_srq_recv(srq->s_srq, &cur->r_wr, &bad_wr)) {
+ struct ib_recv_wr *wr;
+ struct rds_ib_recv_work *recv;
+
+ for (wr = &cur->r_wr; wr; wr = wr->next) {
+ recv = container_of(wr, struct rds_ib_recv_work, r_wr);
+ rds_ib_srq_clear_one(srq, recv->r_ic, recv);
+ }
+ printk(KERN_ERR "ib_post_srq_recv failed\n");
+ goto out;
+ }
+ atomic_add(refills, &srq->s_num_posted);
+ }
+
+ if (!total_refills)
+ rds_ib_stats_inc(s_ib_srq_empty_refills);
out:
- clear_bit(0,&srq->s_refill_gate);
+ clear_bit(0, &srq->s_refill_gate);
}
int rds_ib_srq_prefill_ring(struct rds_ib_device *rds_ibdev)
{
- struct rds_ib_recv_work *recv;
- struct ib_recv_wr *bad_wr;
- u32 i;
- int ret;
-
- for (i = 0, recv = rds_ibdev->srq->s_recvs;
- i < rds_ibdev->srq->s_n_wr; i++, recv++) {
- recv->r_wr.next = NULL;
- recv->r_wr.wr_id = i;
- recv->r_wr.sg_list = recv->r_sge;
- recv->r_wr.num_sge = RDS_IB_RECV_SGE;
- recv->r_ibinc = NULL;
- recv->r_frag = NULL;
- recv->r_ic = NULL;
-
- if (rds_ib_srq_prefill_one(rds_ibdev, recv, 1))
- return 1;
-
- ret = ib_post_srq_recv(rds_ibdev->srq->s_srq,
- &recv->r_wr, &bad_wr);
- if (ret) {
- printk(KERN_WARNING "RDS: ib_post_srq_recv failed %d\n", ret);
- return 1;
- }
- atomic_inc(&rds_ibdev->srq->s_num_posted);
- recv->r_posted = 1;
- }
- return 0;
+ struct rds_ib_recv_work *recv;
+ struct ib_recv_wr *bad_wr;
+ u32 i;
+ int ret;
+
+ for (i = 0, recv = rds_ibdev->srq->s_recvs;
+ i < rds_ibdev->srq->s_n_wr; i++, recv++) {
+ recv->r_wr.next = NULL;
+ recv->r_wr.wr_id = i;
+ recv->r_wr.sg_list = recv->r_sge;
+ recv->r_wr.num_sge = RDS_IB_RECV_SGE;
+ recv->r_ibinc = NULL;
+ recv->r_frag = NULL;
+ recv->r_ic = NULL;
+
+ if (rds_ib_srq_prefill_one(rds_ibdev, recv, 1))
+ return 1;
+
+ ret = ib_post_srq_recv(rds_ibdev->srq->s_srq,
+ &recv->r_wr, &bad_wr);
+ if (ret) {
+ printk(KERN_WARNING "RDS: ib_post_srq_recv failed %d\n", ret);
+ return 1;
+ }
+ atomic_inc(&rds_ibdev->srq->s_num_posted);
+ recv->r_posted = 1;
+ }
+ return 0;
}
static void rds_ib_srq_clear_ring(struct rds_ib_device *rds_ibdev)
{
- u32 i;
- struct rds_ib_recv_work *recv;
+ u32 i;
+ struct rds_ib_recv_work *recv;
- for (i = 0, recv = rds_ibdev->srq->s_recvs;
- i < rds_ibdev->srq->s_n_wr; i++, recv++)
- rds_ib_srq_clear_one(rds_ibdev->srq, recv->r_ic, recv);
+ for (i = 0, recv = rds_ibdev->srq->s_recvs;
+ i < rds_ibdev->srq->s_n_wr; i++, recv++)
+ rds_ib_srq_clear_one(rds_ibdev->srq, recv->r_ic, recv);
}
void rds_ib_srq_rearm(struct work_struct *work)
{
- struct rds_ib_srq *srq = container_of(work, struct rds_ib_srq, s_rearm_w.work);
- struct ib_srq_attr srq_attr;
-
- srq_attr.srq_limit = rds_ib_srq_low_wr;
- if (ib_modify_srq(srq->s_srq, &srq_attr, IB_SRQ_LIMIT)) {
- printk(KERN_ERR "RDS: ib_modify_srq failed\n");
- return;
- }
+ struct rds_ib_srq *srq = container_of(work, struct rds_ib_srq, s_rearm_w.work);
+ struct ib_srq_attr srq_attr;
+
+ srq_attr.srq_limit = rds_ib_srq_low_wr;
+ if (ib_modify_srq(srq->s_srq, &srq_attr, IB_SRQ_LIMIT)) {
+ printk(KERN_ERR "RDS: ib_modify_srq failed\n");
+ return;
+ }
}
static void rds_ib_srq_event(struct ib_event *event,
- void *ctx)
+ void *ctx)
{
- struct ib_srq_attr srq_attr;
- struct rds_ib_device *rds_ibdev = ctx;
-
- switch (event->event) {
- case IB_EVENT_SRQ_ERR:
- printk(KERN_ERR "RDS: event IB_EVENT_SRQ_ERR unhandled\n",
- event->event);
- break;
- case IB_EVENT_SRQ_LIMIT_REACHED:
- rds_ib_stats_inc(s_ib_srq_lows);
- queue_delayed_work(rds_wq, &rds_ibdev->srq->s_rearm_w,HZ);
-
- if (!test_and_set_bit(0, &rds_ibdev->srq->s_refill_gate))
- queue_delayed_work(rds_wq, &rds_ibdev->srq->s_refill_w, 0);
- break;
- default:
- break;
- }
+ struct ib_srq_attr srq_attr;
+ struct rds_ib_device *rds_ibdev = ctx;
+
+ switch (event->event) {
+ case IB_EVENT_SRQ_ERR:
+ printk(KERN_ERR "RDS: event IB_EVENT_SRQ_ERR unhandled\n",
+ event->event);
+ break;
+ case IB_EVENT_SRQ_LIMIT_REACHED:
+ rds_ib_stats_inc(s_ib_srq_lows);
+ queue_delayed_work(rds_wq, &rds_ibdev->srq->s_rearm_w, HZ);
+
+ if (!test_and_set_bit(0, &rds_ibdev->srq->s_refill_gate))
+ queue_delayed_work(rds_wq, &rds_ibdev->srq->s_refill_w, 0);
+ break;
+ default:
+ break;
+ }
}
/* Setup SRQ for a device */
int rds_ib_srq_init(struct rds_ib_device *rds_ibdev)
{
- struct ib_srq_init_attr srq_init_attr = {
- rds_ib_srq_event,
- (void *)rds_ibdev,
- .attr = {
- .max_wr = rds_ib_srq_max_wr - 1,
- .max_sge = rds_ibdev->max_sge
- }
- };
+ struct ib_srq_init_attr srq_init_attr = {
+ rds_ib_srq_event,
+ (void *)rds_ibdev,
+ .attr = {
+ .max_wr = rds_ib_srq_max_wr - 1,
+ .max_sge = rds_ibdev->max_sge
+ }
+ };
- rds_ibdev->srq->rds_ibdev = rds_ibdev;
+ rds_ibdev->srq->rds_ibdev = rds_ibdev;
- rds_ibdev->srq->s_n_wr = rds_ib_srq_max_wr - 1;
- rds_ibdev->srq->s_srq = ib_create_srq(rds_ibdev->pd,
- &srq_init_attr);
+ rds_ibdev->srq->s_n_wr = rds_ib_srq_max_wr - 1;
+ rds_ibdev->srq->s_srq = ib_create_srq(rds_ibdev->pd,
+ &srq_init_attr);
- if (IS_ERR(rds_ibdev->srq->s_srq)) {
- printk(KERN_WARNING "RDS: ib_create_srq failed %d\n",
- ERR_PTR(rds_ibdev->srq->s_srq));
- return 1;
- }
+ if (IS_ERR(rds_ibdev->srq->s_srq)) {
+ printk(KERN_WARNING "RDS: ib_create_srq failed %d\n",
+ ERR_PTR(rds_ibdev->srq->s_srq));
+ return 1;
+ }
- rds_ibdev->srq->s_recv_hdrs = ib_dma_alloc_coherent(rds_ibdev->dev,
- rds_ibdev->srq->s_n_wr *
- sizeof(struct rds_header),
- &rds_ibdev->srq->s_recv_hdrs_dma, GFP_KERNEL);
- if (!rds_ibdev->srq->s_recv_hdrs) {
- printk(KERN_WARNING "ib_dma_alloc_coherent failed\n");
- return 1;
- }
+ rds_ibdev->srq->s_recv_hdrs = ib_dma_alloc_coherent(rds_ibdev->dev,
+ rds_ibdev->srq->s_n_wr *
+ sizeof(struct rds_header),
+ &rds_ibdev->srq->s_recv_hdrs_dma, GFP_KERNEL);
+ if (!rds_ibdev->srq->s_recv_hdrs) {
+ printk(KERN_WARNING "ib_dma_alloc_coherent failed\n");
+ return 1;
+ }
- rds_ibdev->srq->s_recvs = vmalloc(rds_ibdev->srq->s_n_wr *
- sizeof(struct rds_ib_recv_work));
+ rds_ibdev->srq->s_recvs = vmalloc(rds_ibdev->srq->s_n_wr *
+ sizeof(struct rds_ib_recv_work));
- if (!rds_ibdev->srq->s_recvs) {
- printk(KERN_WARNING "RDS: vmalloc failed\n");
- return 1;
- }
+ if (!rds_ibdev->srq->s_recvs) {
+ printk(KERN_WARNING "RDS: vmalloc failed\n");
+ return 1;
+ }
- memset(rds_ibdev->srq->s_recvs, 0, rds_ibdev->srq->s_n_wr *
- sizeof(struct rds_ib_recv_work));
+ memset(rds_ibdev->srq->s_recvs, 0, rds_ibdev->srq->s_n_wr *
+ sizeof(struct rds_ib_recv_work));
- atomic_set(&rds_ibdev->srq->s_num_posted,0);
- clear_bit(0,&rds_ibdev->srq->s_refill_gate);
+ atomic_set(&rds_ibdev->srq->s_num_posted, 0);
+ clear_bit(0, &rds_ibdev->srq->s_refill_gate);
- if (rds_ib_srq_prefill_ring(rds_ibdev))
- return 1;
+ if (rds_ib_srq_prefill_ring(rds_ibdev))
+ return 1;
- INIT_DELAYED_WORK(&rds_ibdev->srq->s_refill_w, rds_ib_srq_refill);
+ INIT_DELAYED_WORK(&rds_ibdev->srq->s_refill_w, rds_ib_srq_refill);
- INIT_DELAYED_WORK(&rds_ibdev->srq->s_rearm_w, rds_ib_srq_rearm);
+ INIT_DELAYED_WORK(&rds_ibdev->srq->s_rearm_w, rds_ib_srq_rearm);
- queue_delayed_work(rds_wq, &rds_ibdev->srq->s_rearm_w, 0);
+ queue_delayed_work(rds_wq, &rds_ibdev->srq->s_rearm_w, 0);
- return 0;
+ return 0;
}
int rds_ib_srqs_init(void)
{
- struct rds_ib_device *rds_ibdev;
- int ret;
+ struct rds_ib_device *rds_ibdev;
+ int ret;
- list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
- ret = rds_ib_srq_init(rds_ibdev);
- if (ret) return ret;
- }
+ list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
+ ret = rds_ib_srq_init(rds_ibdev);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return 0;
}
void rds_ib_srq_exit(struct rds_ib_device *rds_ibdev)
{
- int ret;
-
- ret = ib_destroy_srq(rds_ibdev->srq->s_srq);
- if (ret) {
- printk(KERN_WARNING "RDS: ib_destroy_srq failed %d\n", ret);
- }
- rds_ibdev->srq->s_srq = NULL;
-
- if (rds_ibdev->srq->s_recv_hdrs)
- ib_dma_free_coherent(rds_ibdev->dev,
- rds_ibdev->srq->s_n_wr *
- sizeof(struct rds_header),
- rds_ibdev->srq->s_recv_hdrs,
- rds_ibdev->srq->s_recv_hdrs_dma);
-
- rds_ib_srq_clear_ring(rds_ibdev);
- vfree(rds_ibdev->srq->s_recvs);
- rds_ibdev->srq->s_recvs = NULL;
+ int ret;
+
+ ret = ib_destroy_srq(rds_ibdev->srq->s_srq);
+ if (ret)
+ printk(KERN_WARNING "RDS: ib_destroy_srq failed %d\n", ret);
+ rds_ibdev->srq->s_srq = NULL;
+
+ if (rds_ibdev->srq->s_recv_hdrs)
+ ib_dma_free_coherent(rds_ibdev->dev,
+ rds_ibdev->srq->s_n_wr *
+ sizeof(struct rds_header),
+ rds_ibdev->srq->s_recv_hdrs,
+ rds_ibdev->srq->s_recv_hdrs_dma);
+
+ rds_ib_srq_clear_ring(rds_ibdev);
+ vfree(rds_ibdev->srq->s_recvs);
+ rds_ibdev->srq->s_recvs = NULL;
}
void rds_ib_srqs_exit(void)
{
- struct rds_ib_device *rds_ibdev;
+ struct rds_ib_device *rds_ibdev;
- list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
- rds_ib_srq_exit(rds_ibdev);
- }
+ list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
+ rds_ib_srq_exit(rds_ibdev);
+ }
}