struct ib_qp_init_attr *init_attr,
struct sif_qp_init_attr sif_attr);
+struct sif_sq *get_sq(struct sif_dev *sdev, struct sif_qp *qp)
+{
+ return is_xtgt_qp(qp) ? NULL : get_sif_sq(sdev, qp->qp_idx);
+}
+
+/* Get RQ associated to QP */
+struct sif_rq *get_rq(struct sif_dev *sdev, struct sif_qp *qp)
+{
+ return is_xrc_qp(qp) || qp->type == PSIF_QP_TRANSPORT_MANSP1 ?
+ NULL : get_sif_rq(sdev, qp->rq_idx);
+}
+
static int poll_wait_for_qp_writeback(struct sif_dev *sdev, struct sif_qp *qp)
{
unsigned long timeout = sdev->min_resp_ticks;
struct sif_qp_init_attr *sif_attr)
{
struct sif_qp *qp, *rqp = NULL;
- struct sif_sq *sq;
- struct psif_qp qpi;
+ struct sif_sq *sq = NULL;
struct sif_rq *rq = NULL;
+ struct psif_qp qpi;
struct sif_pd *pd = sif_attr->pd;
int ret = 0;
sif_log(sdev, SIF_QP, "qpn %d, qp 0x%p [no send cq] (type %s) port %d, pd %d",
index, qp, string_enum_psif_qp_trans(qp->type), qp->port, pd->idx);
- /* The PQP does not have any receive queue, neither does the XRC qp
- * where RQs are selected per work request via wr.xrc_hdr.xrqd_id
- */
- if (is_regular_qp(qp)) {
+ /* The PQP and XRC QPs do not have receive queues */
+ if (qp->type != PSIF_QP_TRANSPORT_MANSP1 && qp->type != PSIF_QP_TRANSPORT_XRC) {
if (init_attr->srq) {
rq = to_srq(init_attr->srq);
if (atomic_add_unless(&rq->refcnt, 1, 0)) {
rq->cq_idx = recv_cq->index;
}
+ if (init_attr->qp_type != IB_QPT_XRC_TGT) {
+ /* sq always gets same index as QP.. */
+ ret = sif_alloc_sq(sdev, pd, qp, &init_attr->cap,
+ sif_attr->user_mode, sif_attr->sq_hdl_sz);
+ if (ret < 0) {
+ rqp = ERR_PTR(ret);
+ goto err_sq_fail;
+ }
- /* sq always gets same index as QP.. */
- ret = sif_alloc_sq(sdev, pd, qp, &init_attr->cap,
- sif_attr->user_mode, sif_attr->sq_hdl_sz);
- if (ret < 0) {
- rqp = ERR_PTR(ret);
- goto err_sq_fail;
- }
-
- /* Store send completion queue index default since
- * for psif send cq number is a parameter in the work request
- */
- sq = get_sif_sq(sdev, qp->qp_idx);
- sq->cq_idx = send_cq ? send_cq->index : (u32)-1; /* XRC recv only */
- sq->complete_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR ? 1 : 0;
+ /* Store send completion queue index default since
+ * for psif send cq number is a parameter in the work request
+ */
+ sq = get_sif_sq(sdev, qp->qp_idx);
+ sq->cq_idx = send_cq ? send_cq->index : (u32)-1; /* XRC recv only */
+ sq->complete_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR ? 1 : 0;
- /* Adjust requested values based on what we got: */
- init_attr->cap.max_send_wr = sq->entries;
+ /* Adjust requested values based on what we got: */
+ init_attr->cap.max_send_wr = sq->entries;
+ }
/* Initialization of qp state via local copy */
memset(&qpi, 0, sizeof(struct psif_qp));
- if (multipacket_qp(qp->type)) {
+ if (is_reliable_qp(qp->type) && init_attr->qp_type != IB_QPT_XRC_TGT) {
qpi.state.sq_clog2_extent = order_base_2(sq->extent);
qpi.state.sq_clog2_size = order_base_2(sq->entries);
}
qp->port = init_attr->port_num;
sdev->pma_qp_idxs[qp->port - 1] = qp->qp_idx;
- /* Make dfs and query_qp happy: */
+ /* Init ibqp side of things */
qp->ibqp.device = &sdev->ib_dev;
+ qp->ibqp.real_qp = &qp->ibqp;
+ qp->ibqp.uobject = NULL;
+ qp->ibqp.qp_type = IB_QPT_GSI;
+ atomic_set(&qp->ibqp.usecnt, 0);
+ qp->ibqp.event_handler = init_attr->event_handler;
+ qp->ibqp.qp_context = init_attr->qp_context;
+ qp->ibqp.recv_cq = init_attr->recv_cq;
+ qp->ibqp.srq = init_attr->srq;
qp->ibqp.pd = &sdev->pd->ibpd;
+ qp->ibqp.send_cq = init_attr->send_cq;
+ qp->ibqp.xrcd = NULL;
/* Set back IB_QPT_GSI */
init_attr->qp_type = IB_QPT_GSI;
if (udata) {
struct sif_create_qp_resp_ext resp;
- struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx);
+ struct sif_sq *sq = (init_attr->qp_type != IB_QPT_XRC_TGT) ?
+ get_sif_sq(sdev, qp->qp_idx) : NULL;
+ struct sif_rq *rq = get_rq(sdev, qp);
int rv;
memset(&resp, 0, sizeof(resp));
resp.qp_idx = qp->qp_idx;
- resp.sq_extent = sq->extent;
- resp.sq_sgl_offset = sq->sgl_offset;
- resp.sq_mr_idx = sq->sg_mr ? sq->sg_mr->index : 0;
- resp.sq_dma_handle = sif_mem_dma(sq->mem, 0);
- if (init_attr->qp_type != IB_QPT_XRC_INI && init_attr->qp_type != IB_QPT_XRC_TGT) {
- /* XRC qps do not have any rq */
- struct sif_rq *rq = get_sif_rq(sdev, qp->rq_idx);
+ if (sq) {
+ resp.sq_extent = sq->extent;
+ resp.sq_sgl_offset = sq->sgl_offset;
+ resp.sq_mr_idx = sq->sg_mr ? sq->sg_mr->index : 0;
+ resp.sq_dma_handle = sif_mem_dma(sq->mem, 0);
+ }
+
+ if (rq) {
resp.rq_idx = qp->rq_idx;
resp.rq_extent = rq->extent;
}
int ret;
enum rdma_link_layer ll = IB_LINK_LAYER_INFINIBAND;
- /* PSIF treats XRC just as any other RC QP */
- if (type == IB_QPT_XRC_INI || type == IB_QPT_XRC_TGT)
- type = IB_QPT_RC;
ret = ((qp->type == PSIF_QP_TRANSPORT_MANSP1 || is_epsa_tunneling_qp(type)) ? 1 :
ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll));
if (!ret)
int ret = 0;
if (need_wa_3713 || need_wa_4074) {
- if (qp->type != PSIF_QP_TRANSPORT_MANSP1)
+ if (qp->type != PSIF_QP_TRANSPORT_MANSP1 && !is_xtgt_qp(qp))
ret = pre_process_wa4074(sdev, qp);
if (ret) {
qp->flags &= ~SIF_QPF_HW_OWNED;
- if (qp->type != PSIF_QP_TRANSPORT_MANSP1)
+ if (qp->type != PSIF_QP_TRANSPORT_MANSP1 && !is_xtgt_qp(qp))
ret = post_process_wa4074(sdev, qp);
if (ret)
{
int ret = 0;
struct ib_qp *ibqp = &qp->ibqp;
+ struct sif_rq *rq = get_rq(sdev, qp);
+ struct sif_sq *sq = get_sq(sdev, qp);
enum ib_qp_state cur_state, new_state;
enum sif_mqp_type mqp_type = SIF_MQP_IGN;
sif_log(sdev, SIF_QP, "Enter: qpn %d qp_idx %d mask 0x%x",
ibqp->qp_num, qp->qp_idx, qp_attr_mask);
- /* WA #622, RQ flush from error completion in userspace */
- if (udata && is_regular_qp(qp)) {
+ /* WA for Bug 622, RQ flush from error completion in userspace */
+ if (udata) {
struct sif_modify_qp_ext cmd;
- struct sif_rq *rq = get_sif_rq(sdev, qp->rq_idx);
ret = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (ret) {
switch (cmd.flush) {
case FLUSH_RQ:
- ret = sif_flush_rq(sdev, rq, qp, rq->entries);
- if (ret)
- sif_log(sdev, SIF_INFO, "failed to flush RQ %d",
- rq->index);
+ if (unlikely(!rq)) {
+ ret =-EINVAL;
+ sif_log(sdev, SIF_INFO,
+ "flush requested for qp(type %s) with no rq defined",
+ string_enum_psif_qp_trans(qp->type));
+ } else {
+ ret = sif_flush_rq(sdev, rq, qp, rq->entries);
+ if (ret)
+ sif_log(sdev, SIF_INFO, "failed to flush RQ %d", rq->index);
+ }
return ret;
case FLUSH_SQ:
- ret = post_process_wa4074(sdev, qp);
- if (ret)
- sif_log(sdev, SIF_INFO, "failed to flush SQ %d", qp->qp_idx);
+ if (unlikely(!sq)) {
+ ret =-EINVAL;
+ sif_log(sdev, SIF_INFO,
+ "flush requested for qp(type %s) with no sq defined",
+ string_enum_psif_qp_trans(qp->type));
+ } else {
+ ret = post_process_wa4074(sdev, qp);
+ if (ret)
+ sif_log(sdev, SIF_INFO, "failed to flush SQ %d", qp->qp_idx);
+ }
return ret;
default:
break;
new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
+ sif_log(sdev, SIF_QP, "qpn %d qp_idx %d requested state 0x%x cur state 0x%x",
+ ibqp->qp_num, qp->qp_idx, new_state, cur_state);
+
if (!fail_on_same_state && cur_state == qp_attr->qp_state) {
/* Silently ignore.. (used at destroy time) */
goto sif_mqp_ret;
*/
switch (new_state) {
case IB_QPS_ERR:
- if (is_regular_qp(qp)) {
- struct sif_rq *rq = get_sif_rq(sdev, qp->rq_idx);
-
+ if (rq) {
/* WA #3850:if SRQ, generate LAST_WQE event */
if (rq->is_srq && qp->ibqp.event_handler) {
struct ib_event ibe = {
};
qp->ibqp.event_handler(&ibe, qp->ibqp.qp_context);
- } else if (rq && !rq->is_srq) {
+ } else if (!rq->is_srq) {
/* WA #622: if reqular RQ, flush */
ret = sif_flush_rq(sdev, rq, qp, rq->entries);
if (ret) {
qp->qp_idx, qp_attr->dest_qp_num);
}
+ /* PSIF requires additional attributes to transition XRC-QP to RTS */
+ if (is_xrc_qp(qp) && qp_attr->qp_state == IB_QPS_RTS) {
+ ctrl_attr->error_retry_count = 1;
+ mct->data.error_retry_count = 7;
+ ctrl_attr->rnr_retry_count = 1;
+ mct->data.rnr_retry_count = 7;
+ ctrl_attr->max_outstanding = 1;
+ mct->data.max_outstanding = 16;
+ }
+
ok_modify_qp_sw:
/*
if (ret)
goto err_modify_qp;
- if (reliable_qp(qp->type)
- && (qp_attr_mask & IB_QP_STATE)) {
- if ((qp->last_set_state == IB_QPS_INIT)
- && (qp_attr->qp_state == IB_QPS_RTR)) {
+ if (!is_xtgt_qp(qp) && is_reliable_qp(qp->type) && (qp_attr_mask & IB_QP_STATE)) {
+ if ((qp->last_set_state == IB_QPS_INIT) && (qp_attr->qp_state == IB_QPS_RTR)) {
/* Map the new send queue into the global sq_cmpl PSIF
* only address map, see #944
*/
qp->sq_cmpl_map_valid = true;
- } else if ((qp->sq_cmpl_map_valid)
- && (qp_attr->qp_state == IB_QPS_RESET)) {
+ } else if ((qp->sq_cmpl_map_valid) && (qp_attr->qp_state == IB_QPS_RESET)) {
/* Unmap the send queue from the global sq_cmpl PSIF */
ret = sif_sq_cmpl_unmap_sq(sdev, get_sif_sq(sdev, qp->qp_idx));
if (ret)
struct sif_dev *sdev = to_sdev(ibqp->device);
struct sif_qp *qp = to_sqp(ibqp);
volatile struct psif_qp *qps = &qp->d;
- struct sif_rq *rq = NULL;
- struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx);
+ struct sif_rq *rq = get_rq(sdev, qp);
+ struct sif_sq *sq = get_sq(sdev, qp);
int ret = 0;
- if (qp->type != PSIF_QP_TRANSPORT_XRC)
- rq = get_sif_rq(sdev, qp->rq_idx);
-
/* Mellanox almost completely ignores the mask on both
* input and output and reports all attributes regardlessly..
* as opposed to what man ibv_query_qp indicates.
qp_init_attr->cap.max_recv_wr = rq->entries_user;
qp_init_attr->cap.max_recv_sge = rq->sg_entries;
}
- qp_init_attr->cap.max_send_wr = sq->entries;
- qp_init_attr->cap.max_send_sge = sq->sg_entries;
+
+ if (sq) {
+ qp_init_attr->cap.max_send_wr = sq->entries;
+ qp_init_attr->cap.max_send_sge = sq->sg_entries;
+ }
qp_init_attr->cap.max_inline_data = qp->max_inline_data;
/* TBD: What to do with this:
int ret = 0;
struct sif_qp *qp = to_sqp(ibqp);
struct sif_dev *sdev = to_sdev(ibqp->device);
- struct sif_rq *rq = NULL;
- struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx);
+ struct sif_rq *rq = get_rq(sdev, qp);
+ struct sif_sq *sq = get_sq(sdev, qp);
struct psif_query_qp lqqp;
/* Take QP lock to avoid any race condition on updates to last_set_state: */
if (ret)
return ret;
- if (qp->type != PSIF_QP_TRANSPORT_XRC)
- rq = get_sif_rq(sdev, qp->rq_idx);
-
/* Mellanox almost completely ignores the mask on both
* input and output and reports all attributes regardlessly..
* as opposed to what man ibv_query_qp indicates.
qp_init_attr->cap.max_recv_wr = rq->entries_user;
qp_init_attr->cap.max_recv_sge = rq->sg_entries;
}
- qp_init_attr->cap.max_send_wr = sq->entries;
- qp_init_attr->cap.max_send_sge = sq->sg_entries;
+
+ if (sq) {
+ qp_init_attr->cap.max_send_wr = sq->entries;
+ qp_init_attr->cap.max_send_sge = sq->sg_entries;
+ }
qp_init_attr->cap.max_inline_data = qp->max_inline_data;
/* TBD: What to do with these..
struct ib_qp_attr mod_attr = {
.qp_state = IB_QPS_RESET
};
- struct sif_rq *rq = NULL;
+ struct sif_rq *rq = get_rq(sdev, qp);
bool reuse_ok = true;
/* See bug #3496 */
sif_log(sdev, SIF_QP, "## Enter qp_idx %d", index);
- if (is_regular_qp(qp))
- rq = get_sif_rq(sdev, qp->rq_idx);
-
/* make sure event handling is performed before reset the qp.*/
if (atomic_dec_and_test(&qp->refcnt))
complete(&qp->can_destroy);
if (!(qp->flags & SIF_QPF_USER_MODE)) {
int nfixup;
- struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx);
+ struct sif_sq *sq = get_sq(sdev, qp);
u32 cq_idx = get_psif_qp_core__rcv_cq_indx(&qp->d.state);
struct sif_cq *send_cq = (sq && sq->cq_idx >= 0) ? get_sif_cq(sdev, sq->cq_idx) : NULL;
struct sif_cq *recv_cq = rq ? get_sif_cq(sdev, cq_idx) : NULL;
sif_free_sq(sdev, qp);
if (rq) {
- ret = free_rq(sdev, qp->rq_idx);
- if (ret && (ret != -EBUSY || !rq->is_srq))
- return ret;
+ if (rq->is_srq)
+ atomic_dec(&rq->refcnt);
+ else
+ ret = free_rq(sdev, qp->rq_idx);
+ if (ret && ret != -EBUSY)
+ return ret;
}
if (index > 3 && reuse_ok)
static int reset_qp(struct sif_dev *sdev, struct sif_qp *qp)
{
volatile struct psif_qp *qps = &qp->d;
- struct sif_rq *rq = NULL;
- struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx);
+ struct sif_rq *rq = get_rq(sdev, qp);
+ struct sif_sq *sq = get_sq(sdev, qp);
bool need_wa_3713 = 0;
/* Bring down order needed by rev2 according to bug #3480 */
if (ret)
goto failed;
- if (is_regular_qp(qp))
- rq = get_sif_rq(sdev, qp->rq_idx);
-
/* WA 3713 special handling */
need_wa_3713 = (PSIF_REVISION(sdev) <= 3)
&& IS_PSIF(sdev) /* Next check if there is a retry outstanding */
/* if the send queue scheduler is running, wait for
* it to terminate:
*/
- ret = sif_flush_sqs(sdev, sq);
- if (ret)
- goto failed;
+ ret = 0;
+ if (qp->ibqp.qp_type != IB_QPT_XRC_TGT) {
+ ret = sif_flush_sqs(sdev, sq);
+ if (ret)
+ goto failed;
+ }
sif_logs(SIF_DUMP,
write_struct_psif_qp(NULL, 1, (struct psif_qp *)&qp->d));
}
/* Reset the SQ pointers */
- if (!qp->ibqp.xrcd) {
+ if (!is_xtgt_qp(qp)) {
struct sif_sq_sw *sq_sw = get_sif_sq_sw(sdev, qp->qp_idx);
memset(sq_sw, 0, sizeof(*sq_sw));