cq = ssk->cq;
if (unlikely(!cq))
goto out;
+
+ if (unlikely(!ssk->poll_cq)) {
+ struct rdma_cm_id *id = ssk->id;
+ if (id && id->qp)
+ rdma_notify(id, RDMA_CM_EVENT_ESTABLISHED);
+ goto out;
+ }
+
sdp_poll_cq(ssk, cq);
release_sock(sk);
sk_mem_reclaim(sk);
goto err_cq;
}
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
sdp_sk(sk)->xmit_size_goal,
sdp_sk(sk)->min_bufs);
+ sdp_sk(sk)->poll_cq = 1;
ib_req_notify_cq(sdp_sk(sk)->cq, IB_CQ_NEXT_COMP);
+ sdp_poll_cq(sdp_sk(sk), sdp_sk(sk)->cq);
sk->sk_state_change(sk);
sk_wake_async(sk, 0, POLL_OUT);
lock_sock(newsk);
if (newssk->cq) {
sdp_dbg(newsk, "%s: ib_req_notify_cq\n", __func__);
+ newssk->poll_cq = 1;
ib_req_notify_cq(newssk->cq, IB_CQ_NEXT_COMP);
sdp_poll_cq(newssk, newssk->cq);
}