}
ic->i_sl = ic->i_cm_id->route.path_rec->sl;
+ atomic_set(&ic->i_cq_quiesce, 0);
/*
* Init rings and fill recv. this needs to wait until protocol negotiation
memset(&ack_state, 0, sizeof(ack_state));
rds_ib_stats_inc(s_ib_tasklet_call);
- /* if send cq has been destroyed, ignore incoming cq event */
- if (!ic->i_scq)
+ /* if cq has been already reaped, ignore incoming cq event */
+ if (atomic_read(&ic->i_cq_quiesce))
return;
poll_cq(ic, ic->i_scq, ic->i_send_wc, &ack_state, 0);
rds_ib_stats_inc(s_ib_tasklet_call);
+ /* if cq has been already reaped, ignore incoming cq event */
+ if (atomic_read(&ic->i_cq_quiesce))
+ return;
+
memset(&ack_state, 0, sizeof(ack_state));
ic->i_rx_poll_cq = 0;
tasklet_kill(&ic->i_stasklet);
tasklet_kill(&ic->i_rtasklet);
+ atomic_set(&ic->i_cq_quiesce, 1);
+
/* first destroy the ib state that generates callbacks */
if (ic->i_cm_id->qp)
rdma_destroy_qp(ic->i_cm_id);