From: Vinay Shaw Date: Thu, 12 May 2016 05:52:01 +0000 (+0200) Subject: sif: XRC: XRC support and PSIF 2.1 limitation #3521 X-Git-Tag: v4.1.12-92~129^2~37 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=4209834a949d68513d499e8bb4f95ae2c0bc299c;p=users%2Fjedix%2Flinux-maple.git sif: XRC: XRC support and PSIF 2.1 limitation #3521 This commit addresses the issue of XRC support (Orabug: 23044600). Changes include XRCTGT QP not to allocate SQ Introduced get_sq/rq function to check XRC cases (XRC INI/TGT QP has no RQ, XRC TGT QP has no SQ & RQ). Overload "ib_qp_attr" attributes for modify XRC QP (RTS state) requirement of PSIF Rearranged/moved all QP helper functions to be in sif_qp.c/.h files Note about user space support for XRC: Since a XRCSRQ can be targeted by multiple XRCTGTQPs with same XRC domain, simply getting a QP# in completion doesn't help. MLX-hw overloads the "src_qp" with XRCSRQ# for completions. For now, we limit the XRC association (not related to kernel context) one user-context <--> one XRCTGTQP/XSRQ Signed-off-by: Vinay Shaw Reviewed-by: Knut Omang --- diff --git a/drivers/infiniband/hw/sif/sif_cq.c b/drivers/infiniband/hw/sif/sif_cq.c index f3409f54c3421..c4d01f76172a0 100644 --- a/drivers/infiniband/hw/sif/sif_cq.c +++ b/drivers/infiniband/hw/sif/sif_cq.c @@ -384,14 +384,19 @@ static int handle_send_wc(struct sif_dev *sdev, struct sif_cq *cq, struct ib_wc *wc, struct psif_cq_entry *cqe, bool qp_is_destroyed) { /* send queue descriptor aligned with qp */ - int sq_idx = cqe->qp; + struct sif_sq *sq = get_sif_sq(sdev, cqe->qp); + struct sif_sq_sw *sq_sw = sq ? get_sif_sq_sw(sdev, cqe->qp): NULL; int ret; - struct sif_sq *sq = get_sif_sq(sdev, sq_idx); - struct sif_sq_sw *sq_sw = get_sif_sq_sw(sdev, sq_idx); /* This is a full 32 bit seq.num */ u32 sq_seq_num = cqe->wc_id.sq_id.sq_seq_num; + if (unlikely(!sq)) { + sif_log(sdev, SIF_INFO, + "sq doesn't exists for qp %d", cqe->qp); + return -EFAULT; + } + if (qp_is_destroyed) { wc->wr_id = cqe->wc_id.rq_id; @@ -457,7 +462,7 @@ static int handle_recv_wc(struct sif_dev *sdev, struct sif_cq *cq, struct ib_wc (wc->status != IB_WC_SUCCESS)) { struct sif_qp *qp = to_sqp(wc->qp); - if (is_regular_qp(qp) && !rq->is_srq + if (rq && !rq->is_srq && IB_QPS_ERR == get_qp_state(qp)) { if (sif_flush_rq(sdev, rq, qp, rq_len)) sif_log(sdev, SIF_INFO, @@ -567,13 +572,13 @@ static int handle_wc(struct sif_dev *sdev, struct sif_cq *cq, */ /* WA #3850: generate LAST_WQE event on SRQ*/ - struct sif_rq *rq = get_sif_rq(sdev, qp->rq_idx); + struct sif_rq *rq = get_rq(sdev, qp); int log_level = (wc->status == IB_WC_WR_FLUSH_ERR) ? SIF_WCE_V : SIF_WCE; - if (!qp_is_destroyed && is_regular_qp(qp) && rq->is_srq) { + if (!qp_is_destroyed && rq && rq->is_srq) { if (fatal_err(qp->ibqp.qp_type, wc)) { struct ib_event ibe = { .device = &sdev->ib_dev, diff --git a/drivers/infiniband/hw/sif/sif_eq.c b/drivers/infiniband/hw/sif/sif_eq.c index e52890dd27821..df1d4e0e0ceff 100644 --- a/drivers/infiniband/hw/sif/sif_eq.c +++ b/drivers/infiniband/hw/sif/sif_eq.c @@ -628,9 +628,9 @@ static void handle_event_work(struct work_struct *work) case IB_EVENT_QP_LAST_WQE_REACHED: { struct ib_qp *ibqp = ew->ibe.element.qp; struct sif_qp *qp = to_sqp(ibqp); + struct sif_rq *rq = get_rq(sdev, qp); - if (is_regular_qp(qp)) { - struct sif_rq *rq = get_sif_rq(sdev, qp->rq_idx); + if (rq) { struct sif_rq_sw *rq_sw = get_sif_rq_sw(sdev, rq->index); /* WA #3850:if SRQ, generate LAST_WQE event */ @@ -659,6 +659,10 @@ static void handle_event_work(struct work_struct *work) struct ib_qp *ibqp = ew->ibe.element.qp; struct sif_qp *qp = to_sqp(ibqp); + /* Avoid MAD layer reporting of fatal error */ + if ((ibqp->qp_type == IB_QPT_GSI) && (ew->ibe.event == IB_EVENT_COMM_EST)) + break; + if (ibqp->event_handler) ibqp->event_handler(&ew->ibe, ibqp->qp_context); diff --git a/drivers/infiniband/hw/sif/sif_qp.c b/drivers/infiniband/hw/sif/sif_qp.c index 8b5a59a693449..c7fdfc99ae9b3 100644 --- a/drivers/infiniband/hw/sif/sif_qp.c +++ b/drivers/infiniband/hw/sif/sif_qp.c @@ -77,6 +77,18 @@ static int sif_create_pma_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init_attr, struct sif_qp_init_attr sif_attr); +struct sif_sq *get_sq(struct sif_dev *sdev, struct sif_qp *qp) +{ + return is_xtgt_qp(qp) ? NULL : get_sif_sq(sdev, qp->qp_idx); +} + +/* Get RQ associated to QP */ +struct sif_rq *get_rq(struct sif_dev *sdev, struct sif_qp *qp) +{ + return is_xrc_qp(qp) || qp->type == PSIF_QP_TRANSPORT_MANSP1 ? + NULL : get_sif_rq(sdev, qp->rq_idx); +} + static int poll_wait_for_qp_writeback(struct sif_dev *sdev, struct sif_qp *qp) { unsigned long timeout = sdev->min_resp_ticks; @@ -143,9 +155,9 @@ struct sif_qp *create_qp(struct sif_dev *sdev, struct sif_qp_init_attr *sif_attr) { struct sif_qp *qp, *rqp = NULL; - struct sif_sq *sq; - struct psif_qp qpi; + struct sif_sq *sq = NULL; struct sif_rq *rq = NULL; + struct psif_qp qpi; struct sif_pd *pd = sif_attr->pd; int ret = 0; @@ -280,10 +292,8 @@ struct sif_qp *create_qp(struct sif_dev *sdev, sif_log(sdev, SIF_QP, "qpn %d, qp 0x%p [no send cq] (type %s) port %d, pd %d", index, qp, string_enum_psif_qp_trans(qp->type), qp->port, pd->idx); - /* The PQP does not have any receive queue, neither does the XRC qp - * where RQs are selected per work request via wr.xrc_hdr.xrqd_id - */ - if (is_regular_qp(qp)) { + /* The PQP and XRC QPs do not have receive queues */ + if (qp->type != PSIF_QP_TRANSPORT_MANSP1 && qp->type != PSIF_QP_TRANSPORT_XRC) { if (init_attr->srq) { rq = to_srq(init_attr->srq); if (atomic_add_unless(&rq->refcnt, 1, 0)) { @@ -327,29 +337,30 @@ struct sif_qp *create_qp(struct sif_dev *sdev, rq->cq_idx = recv_cq->index; } + if (init_attr->qp_type != IB_QPT_XRC_TGT) { + /* sq always gets same index as QP.. */ + ret = sif_alloc_sq(sdev, pd, qp, &init_attr->cap, + sif_attr->user_mode, sif_attr->sq_hdl_sz); + if (ret < 0) { + rqp = ERR_PTR(ret); + goto err_sq_fail; + } - /* sq always gets same index as QP.. */ - ret = sif_alloc_sq(sdev, pd, qp, &init_attr->cap, - sif_attr->user_mode, sif_attr->sq_hdl_sz); - if (ret < 0) { - rqp = ERR_PTR(ret); - goto err_sq_fail; - } - - /* Store send completion queue index default since - * for psif send cq number is a parameter in the work request - */ - sq = get_sif_sq(sdev, qp->qp_idx); - sq->cq_idx = send_cq ? send_cq->index : (u32)-1; /* XRC recv only */ - sq->complete_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR ? 1 : 0; + /* Store send completion queue index default since + * for psif send cq number is a parameter in the work request + */ + sq = get_sif_sq(sdev, qp->qp_idx); + sq->cq_idx = send_cq ? send_cq->index : (u32)-1; /* XRC recv only */ + sq->complete_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR ? 1 : 0; - /* Adjust requested values based on what we got: */ - init_attr->cap.max_send_wr = sq->entries; + /* Adjust requested values based on what we got: */ + init_attr->cap.max_send_wr = sq->entries; + } /* Initialization of qp state via local copy */ memset(&qpi, 0, sizeof(struct psif_qp)); - if (multipacket_qp(qp->type)) { + if (is_reliable_qp(qp->type) && init_attr->qp_type != IB_QPT_XRC_TGT) { qpi.state.sq_clog2_extent = order_base_2(sq->extent); qpi.state.sq_clog2_size = order_base_2(sq->entries); } @@ -500,9 +511,19 @@ static int sif_create_pma_qp(struct ib_pd *ibpd, qp->port = init_attr->port_num; sdev->pma_qp_idxs[qp->port - 1] = qp->qp_idx; - /* Make dfs and query_qp happy: */ + /* Init ibqp side of things */ qp->ibqp.device = &sdev->ib_dev; + qp->ibqp.real_qp = &qp->ibqp; + qp->ibqp.uobject = NULL; + qp->ibqp.qp_type = IB_QPT_GSI; + atomic_set(&qp->ibqp.usecnt, 0); + qp->ibqp.event_handler = init_attr->event_handler; + qp->ibqp.qp_context = init_attr->qp_context; + qp->ibqp.recv_cq = init_attr->recv_cq; + qp->ibqp.srq = init_attr->srq; qp->ibqp.pd = &sdev->pd->ibpd; + qp->ibqp.send_cq = init_attr->send_cq; + qp->ibqp.xrcd = NULL; /* Set back IB_QPT_GSI */ init_attr->qp_type = IB_QPT_GSI; @@ -663,19 +684,22 @@ struct ib_qp *sif_create_qp(struct ib_pd *ibpd, if (udata) { struct sif_create_qp_resp_ext resp; - struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx); + struct sif_sq *sq = (init_attr->qp_type != IB_QPT_XRC_TGT) ? + get_sif_sq(sdev, qp->qp_idx) : NULL; + struct sif_rq *rq = get_rq(sdev, qp); int rv; memset(&resp, 0, sizeof(resp)); resp.qp_idx = qp->qp_idx; - resp.sq_extent = sq->extent; - resp.sq_sgl_offset = sq->sgl_offset; - resp.sq_mr_idx = sq->sg_mr ? sq->sg_mr->index : 0; - resp.sq_dma_handle = sif_mem_dma(sq->mem, 0); - if (init_attr->qp_type != IB_QPT_XRC_INI && init_attr->qp_type != IB_QPT_XRC_TGT) { - /* XRC qps do not have any rq */ - struct sif_rq *rq = get_sif_rq(sdev, qp->rq_idx); + if (sq) { + resp.sq_extent = sq->extent; + resp.sq_sgl_offset = sq->sgl_offset; + resp.sq_mr_idx = sq->sg_mr ? sq->sg_mr->index : 0; + resp.sq_dma_handle = sif_mem_dma(sq->mem, 0); + } + + if (rq) { resp.rq_idx = qp->rq_idx; resp.rq_extent = rq->extent; } @@ -717,9 +741,6 @@ enum sif_mqp_type sif_modify_qp_is_ok(struct sif_qp *qp, enum ib_qp_state cur_st int ret; enum rdma_link_layer ll = IB_LINK_LAYER_INFINIBAND; - /* PSIF treats XRC just as any other RC QP */ - if (type == IB_QPT_XRC_INI || type == IB_QPT_XRC_TGT) - type = IB_QPT_RC; ret = ((qp->type == PSIF_QP_TRANSPORT_MANSP1 || is_epsa_tunneling_qp(type)) ? 1 : ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll)); if (!ret) @@ -792,7 +813,7 @@ int modify_qp_hw_wa_qp_retry(struct sif_dev *sdev, struct sif_qp *qp, int ret = 0; if (need_wa_3713 || need_wa_4074) { - if (qp->type != PSIF_QP_TRANSPORT_MANSP1) + if (qp->type != PSIF_QP_TRANSPORT_MANSP1 && !is_xtgt_qp(qp)) ret = pre_process_wa4074(sdev, qp); if (ret) { @@ -828,7 +849,7 @@ int modify_qp_hw_wa_qp_retry(struct sif_dev *sdev, struct sif_qp *qp, qp->flags &= ~SIF_QPF_HW_OWNED; - if (qp->type != PSIF_QP_TRANSPORT_MANSP1) + if (qp->type != PSIF_QP_TRANSPORT_MANSP1 && !is_xtgt_qp(qp)) ret = post_process_wa4074(sdev, qp); if (ret) @@ -902,16 +923,17 @@ int modify_qp(struct sif_dev *sdev, struct sif_qp *qp, { int ret = 0; struct ib_qp *ibqp = &qp->ibqp; + struct sif_rq *rq = get_rq(sdev, qp); + struct sif_sq *sq = get_sq(sdev, qp); enum ib_qp_state cur_state, new_state; enum sif_mqp_type mqp_type = SIF_MQP_IGN; sif_log(sdev, SIF_QP, "Enter: qpn %d qp_idx %d mask 0x%x", ibqp->qp_num, qp->qp_idx, qp_attr_mask); - /* WA #622, RQ flush from error completion in userspace */ - if (udata && is_regular_qp(qp)) { + /* WA for Bug 622, RQ flush from error completion in userspace */ + if (udata) { struct sif_modify_qp_ext cmd; - struct sif_rq *rq = get_sif_rq(sdev, qp->rq_idx); ret = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); if (ret) { @@ -922,15 +944,28 @@ int modify_qp(struct sif_dev *sdev, struct sif_qp *qp, switch (cmd.flush) { case FLUSH_RQ: - ret = sif_flush_rq(sdev, rq, qp, rq->entries); - if (ret) - sif_log(sdev, SIF_INFO, "failed to flush RQ %d", - rq->index); + if (unlikely(!rq)) { + ret =-EINVAL; + sif_log(sdev, SIF_INFO, + "flush requested for qp(type %s) with no rq defined", + string_enum_psif_qp_trans(qp->type)); + } else { + ret = sif_flush_rq(sdev, rq, qp, rq->entries); + if (ret) + sif_log(sdev, SIF_INFO, "failed to flush RQ %d", rq->index); + } return ret; case FLUSH_SQ: - ret = post_process_wa4074(sdev, qp); - if (ret) - sif_log(sdev, SIF_INFO, "failed to flush SQ %d", qp->qp_idx); + if (unlikely(!sq)) { + ret =-EINVAL; + sif_log(sdev, SIF_INFO, + "flush requested for qp(type %s) with no sq defined", + string_enum_psif_qp_trans(qp->type)); + } else { + ret = post_process_wa4074(sdev, qp); + if (ret) + sif_log(sdev, SIF_INFO, "failed to flush SQ %d", qp->qp_idx); + } return ret; default: break; @@ -944,6 +979,9 @@ int modify_qp(struct sif_dev *sdev, struct sif_qp *qp, new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state; + sif_log(sdev, SIF_QP, "qpn %d qp_idx %d requested state 0x%x cur state 0x%x", + ibqp->qp_num, qp->qp_idx, new_state, cur_state); + if (!fail_on_same_state && cur_state == qp_attr->qp_state) { /* Silently ignore.. (used at destroy time) */ goto sif_mqp_ret; @@ -997,9 +1035,7 @@ sif_mqp_ret: */ switch (new_state) { case IB_QPS_ERR: - if (is_regular_qp(qp)) { - struct sif_rq *rq = get_sif_rq(sdev, qp->rq_idx); - + if (rq) { /* WA #3850:if SRQ, generate LAST_WQE event */ if (rq->is_srq && qp->ibqp.event_handler) { struct ib_event ibe = { @@ -1009,7 +1045,7 @@ sif_mqp_ret: }; qp->ibqp.event_handler(&ibe, qp->ibqp.qp_context); - } else if (rq && !rq->is_srq) { + } else if (!rq->is_srq) { /* WA #622: if reqular RQ, flush */ ret = sif_flush_rq(sdev, rq, qp, rq->entries); if (ret) { @@ -1329,6 +1365,16 @@ static int modify_qp_hw(struct sif_dev *sdev, struct sif_qp *qp, qp->qp_idx, qp_attr->dest_qp_num); } + /* PSIF requires additional attributes to transition XRC-QP to RTS */ + if (is_xrc_qp(qp) && qp_attr->qp_state == IB_QPS_RTS) { + ctrl_attr->error_retry_count = 1; + mct->data.error_retry_count = 7; + ctrl_attr->rnr_retry_count = 1; + mct->data.rnr_retry_count = 7; + ctrl_attr->max_outstanding = 1; + mct->data.max_outstanding = 16; + } + ok_modify_qp_sw: /* @@ -1373,10 +1419,8 @@ ok_modify_qp_sw: if (ret) goto err_modify_qp; - if (reliable_qp(qp->type) - && (qp_attr_mask & IB_QP_STATE)) { - if ((qp->last_set_state == IB_QPS_INIT) - && (qp_attr->qp_state == IB_QPS_RTR)) { + if (!is_xtgt_qp(qp) && is_reliable_qp(qp->type) && (qp_attr_mask & IB_QP_STATE)) { + if ((qp->last_set_state == IB_QPS_INIT) && (qp_attr->qp_state == IB_QPS_RTR)) { /* Map the new send queue into the global sq_cmpl PSIF * only address map, see #944 */ @@ -1386,8 +1430,7 @@ ok_modify_qp_sw: qp->sq_cmpl_map_valid = true; - } else if ((qp->sq_cmpl_map_valid) - && (qp_attr->qp_state == IB_QPS_RESET)) { + } else if ((qp->sq_cmpl_map_valid) && (qp_attr->qp_state == IB_QPS_RESET)) { /* Unmap the send queue from the global sq_cmpl PSIF */ ret = sif_sq_cmpl_unmap_sq(sdev, get_sif_sq(sdev, qp->qp_idx)); if (ret) @@ -1838,13 +1881,10 @@ static int sif_query_qp_sw(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, struct sif_dev *sdev = to_sdev(ibqp->device); struct sif_qp *qp = to_sqp(ibqp); volatile struct psif_qp *qps = &qp->d; - struct sif_rq *rq = NULL; - struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx); + struct sif_rq *rq = get_rq(sdev, qp); + struct sif_sq *sq = get_sq(sdev, qp); int ret = 0; - if (qp->type != PSIF_QP_TRANSPORT_XRC) - rq = get_sif_rq(sdev, qp->rq_idx); - /* Mellanox almost completely ignores the mask on both * input and output and reports all attributes regardlessly.. * as opposed to what man ibv_query_qp indicates. @@ -1890,8 +1930,11 @@ static int sif_query_qp_sw(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_init_attr->cap.max_recv_wr = rq->entries_user; qp_init_attr->cap.max_recv_sge = rq->sg_entries; } - qp_init_attr->cap.max_send_wr = sq->entries; - qp_init_attr->cap.max_send_sge = sq->sg_entries; + + if (sq) { + qp_init_attr->cap.max_send_wr = sq->entries; + qp_init_attr->cap.max_send_sge = sq->sg_entries; + } qp_init_attr->cap.max_inline_data = qp->max_inline_data; /* TBD: What to do with this: @@ -2001,8 +2044,8 @@ static int sif_query_qp_hw(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int ret = 0; struct sif_qp *qp = to_sqp(ibqp); struct sif_dev *sdev = to_sdev(ibqp->device); - struct sif_rq *rq = NULL; - struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx); + struct sif_rq *rq = get_rq(sdev, qp); + struct sif_sq *sq = get_sq(sdev, qp); struct psif_query_qp lqqp; /* Take QP lock to avoid any race condition on updates to last_set_state: */ @@ -2016,9 +2059,6 @@ static int sif_query_qp_hw(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, if (ret) return ret; - if (qp->type != PSIF_QP_TRANSPORT_XRC) - rq = get_sif_rq(sdev, qp->rq_idx); - /* Mellanox almost completely ignores the mask on both * input and output and reports all attributes regardlessly.. * as opposed to what man ibv_query_qp indicates. @@ -2064,8 +2104,11 @@ static int sif_query_qp_hw(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_init_attr->cap.max_recv_wr = rq->entries_user; qp_init_attr->cap.max_recv_sge = rq->sg_entries; } - qp_init_attr->cap.max_send_wr = sq->entries; - qp_init_attr->cap.max_send_sge = sq->sg_entries; + + if (sq) { + qp_init_attr->cap.max_send_wr = sq->entries; + qp_init_attr->cap.max_send_sge = sq->sg_entries; + } qp_init_attr->cap.max_inline_data = qp->max_inline_data; /* TBD: What to do with these.. @@ -2114,7 +2157,7 @@ int destroy_qp(struct sif_dev *sdev, struct sif_qp *qp) struct ib_qp_attr mod_attr = { .qp_state = IB_QPS_RESET }; - struct sif_rq *rq = NULL; + struct sif_rq *rq = get_rq(sdev, qp); bool reuse_ok = true; /* See bug #3496 */ @@ -2132,9 +2175,6 @@ int destroy_qp(struct sif_dev *sdev, struct sif_qp *qp) sif_log(sdev, SIF_QP, "## Enter qp_idx %d", index); - if (is_regular_qp(qp)) - rq = get_sif_rq(sdev, qp->rq_idx); - /* make sure event handling is performed before reset the qp.*/ if (atomic_dec_and_test(&qp->refcnt)) complete(&qp->can_destroy); @@ -2147,7 +2187,7 @@ int destroy_qp(struct sif_dev *sdev, struct sif_qp *qp) if (!(qp->flags & SIF_QPF_USER_MODE)) { int nfixup; - struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx); + struct sif_sq *sq = get_sq(sdev, qp); u32 cq_idx = get_psif_qp_core__rcv_cq_indx(&qp->d.state); struct sif_cq *send_cq = (sq && sq->cq_idx >= 0) ? get_sif_cq(sdev, sq->cq_idx) : NULL; struct sif_cq *recv_cq = rq ? get_sif_cq(sdev, cq_idx) : NULL; @@ -2202,9 +2242,12 @@ fixup_failed: sif_free_sq(sdev, qp); if (rq) { - ret = free_rq(sdev, qp->rq_idx); - if (ret && (ret != -EBUSY || !rq->is_srq)) - return ret; + if (rq->is_srq) + atomic_dec(&rq->refcnt); + else + ret = free_rq(sdev, qp->rq_idx); + if (ret && ret != -EBUSY) + return ret; } if (index > 3 && reuse_ok) @@ -2220,8 +2263,8 @@ fixup_failed: static int reset_qp(struct sif_dev *sdev, struct sif_qp *qp) { volatile struct psif_qp *qps = &qp->d; - struct sif_rq *rq = NULL; - struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx); + struct sif_rq *rq = get_rq(sdev, qp); + struct sif_sq *sq = get_sq(sdev, qp); bool need_wa_3713 = 0; /* Bring down order needed by rev2 according to bug #3480 */ @@ -2230,9 +2273,6 @@ static int reset_qp(struct sif_dev *sdev, struct sif_qp *qp) if (ret) goto failed; - if (is_regular_qp(qp)) - rq = get_sif_rq(sdev, qp->rq_idx); - /* WA 3713 special handling */ need_wa_3713 = (PSIF_REVISION(sdev) <= 3) && IS_PSIF(sdev) /* Next check if there is a retry outstanding */ @@ -2252,9 +2292,12 @@ static int reset_qp(struct sif_dev *sdev, struct sif_qp *qp) /* if the send queue scheduler is running, wait for * it to terminate: */ - ret = sif_flush_sqs(sdev, sq); - if (ret) - goto failed; + ret = 0; + if (qp->ibqp.qp_type != IB_QPT_XRC_TGT) { + ret = sif_flush_sqs(sdev, sq); + if (ret) + goto failed; + } sif_logs(SIF_DUMP, write_struct_psif_qp(NULL, 1, (struct psif_qp *)&qp->d)); @@ -2267,7 +2310,7 @@ failed: } /* Reset the SQ pointers */ - if (!qp->ibqp.xrcd) { + if (!is_xtgt_qp(qp)) { struct sif_sq_sw *sq_sw = get_sif_sq_sw(sdev, qp->qp_idx); memset(sq_sw, 0, sizeof(*sq_sw)); diff --git a/drivers/infiniband/hw/sif/sif_qp.h b/drivers/infiniband/hw/sif/sif_qp.h index 0ab36abd38040..81ed291bdbf73 100644 --- a/drivers/infiniband/hw/sif/sif_qp.h +++ b/drivers/infiniband/hw/sif/sif_qp.h @@ -158,10 +158,36 @@ static inline int psif_supported_trans(enum psif_qp_trans type) return type != PSIF_QP_TRANSPORT_RSVD1; } -static inline bool is_regular_qp(struct sif_qp *qp) +static inline bool is_xini_qp(struct sif_qp *qp) { - return (qp->type != PSIF_QP_TRANSPORT_MANSP1 && - qp->type != PSIF_QP_TRANSPORT_XRC); + return qp->ibqp.qp_type == IB_QPT_XRC_INI; +} + +static inline bool is_xtgt_qp(struct sif_qp *qp) +{ + return qp->ibqp.qp_type == IB_QPT_XRC_TGT; +} + +static inline bool is_xrc_qp(struct sif_qp *qp) +{ + return qp->type == PSIF_QP_TRANSPORT_XRC; +} + +static inline bool is_reliable_qp(enum psif_qp_trans type) +{ + return type == PSIF_QP_TRANSPORT_RC || type == PSIF_QP_TRANSPORT_XRC; +} + +static inline bool multipacket_qp(enum psif_qp_trans type) +{ + switch (type) { + case PSIF_QP_TRANSPORT_RC: + case PSIF_QP_TRANSPORT_UC: + case PSIF_QP_TRANSPORT_XRC: + return true; + default: + return false; + } } static inline bool is_epsa_tunneling_qp(enum ib_qp_type type) @@ -249,5 +275,7 @@ static inline bool ib_legal_path_mtu(enum ib_mtu mtu) return (mtu >= IB_MTU_256) && (mtu <= IB_MTU_4096); } +struct sif_sq *get_sq(struct sif_dev *sdev, struct sif_qp *qp); +struct sif_rq *get_rq(struct sif_dev *sdev, struct sif_qp *qp); #endif diff --git a/drivers/infiniband/hw/sif/sif_r3.c b/drivers/infiniband/hw/sif/sif_r3.c index 5dce50c28ddf3..455025787cf27 100644 --- a/drivers/infiniband/hw/sif/sif_r3.c +++ b/drivers/infiniband/hw/sif/sif_r3.c @@ -465,7 +465,7 @@ static int outstanding_wqes(struct sif_dev *sdev, struct sif_qp *qp, u16 *head) int pre_process_wa4074(struct sif_dev *sdev, struct sif_qp *qp) { - struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx); + struct sif_sq *sq = get_sq(sdev, qp); struct psif_sq_entry *sqe; u16 head; int len; @@ -473,6 +473,12 @@ int pre_process_wa4074(struct sif_dev *sdev, struct sif_qp *qp) if (qp->flags & SIF_QPF_NO_EVICT) return 0; /* do-not-evict QPs don't have any SQs */ + if (unlikely(!sq)) { + sif_log(sdev, SIF_INFO, "sq not defined for qp %d (type %s)", + qp->qp_idx, string_enum_psif_qp_trans(qp->type)); + return -1; + } + len = outstanding_wqes(sdev, qp, &head); if (len <= 0) return -1; @@ -491,8 +497,8 @@ int pre_process_wa4074(struct sif_dev *sdev, struct sif_qp *qp) */ int post_process_wa4074(struct sif_dev *sdev, struct sif_qp *qp) { - struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx); - struct sif_sq_sw *sq_sw = get_sif_sq_sw(sdev, qp->qp_idx); + struct sif_sq *sq = get_sq(sdev, qp); + struct sif_sq_sw *sq_sw = sq ? get_sif_sq_sw(sdev, qp->qp_idx) : NULL; struct psif_query_qp lqqp; bool last_seq_set = false; u16 last_seq, fence_seq; @@ -500,8 +506,13 @@ int post_process_wa4074(struct sif_dev *sdev, struct sif_qp *qp) int ret = 0; bool need_gen_fence_completion = true; struct sif_cq *cq = (sq && sq->cq_idx >= 0) ? get_sif_cq(sdev, sq->cq_idx) : NULL; - struct sif_cq_sw *cq_sw = get_sif_cq_sw(sdev, cq->index); + struct sif_cq_sw *cq_sw = cq ? get_sif_cq_sw(sdev, cq->index) : NULL; + if (unlikely(!sq || !cq)) { + sif_log(sdev, SIF_INFO, "sq/cq not defined for qp %d (type %s)", + qp->qp_idx, string_enum_psif_qp_trans(qp->type)); + return -1; + } /* if flush SQ is in progress, set FLUSH_SQ_IN_FLIGHT. */ @@ -772,7 +783,7 @@ err_sq_flush: static u16 walk_and_update_cqes(struct sif_dev *sdev, struct sif_qp *qp, u16 head, u16 end) { struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx); - struct sif_cq *cq = (sq && sq->cq_idx >= 0) ? get_sif_cq(sdev, sq->cq_idx) : NULL; + struct sif_cq *cq = sq->cq_idx >= 0 ? get_sif_cq(sdev, sq->cq_idx) : NULL; struct sif_cq_sw *cq_sw = get_sif_cq_sw(sdev, cq->index); volatile struct psif_cq_entry *cqe; u16 last_seq = 0, updated_seq; @@ -833,7 +844,7 @@ static u16 walk_and_update_cqes(struct sif_dev *sdev, struct sif_qp *qp, u16 hea static u16 cq_walk_wa4074(struct sif_dev *sdev, struct sif_qp *qp, bool *last_seq_set) { struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx); - struct sif_cq *cq = (sq && sq->cq_idx >= 0) ? get_sif_cq(sdev, sq->cq_idx) : NULL; + struct sif_cq *cq = sq->cq_idx >= 0 ? get_sif_cq(sdev, sq->cq_idx) : NULL; struct sif_cq_sw *cq_sw = get_sif_cq_sw(sdev, cq->index); volatile struct psif_cq_entry *cqe; u32 seqno, polled_value; diff --git a/drivers/infiniband/hw/sif/sif_sndrcv.c b/drivers/infiniband/hw/sif/sif_sndrcv.c index c2afdab16da0f..9d62eaa937775 100644 --- a/drivers/infiniband/hw/sif/sif_sndrcv.c +++ b/drivers/infiniband/hw/sif/sif_sndrcv.c @@ -37,8 +37,8 @@ int sif_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, { struct sif_dev *sdev = to_sdev(ibqp->device); struct sif_qp *qp = to_sqp(ibqp); - struct sif_sq *sq = get_sif_sq(sdev, qp->qp_idx); - struct sif_sq_sw *sq_sw = get_sif_sq_sw(sdev, qp->qp_idx); + struct sif_sq *sq = get_sq(sdev, qp); + struct sif_sq_sw *sq_sw = sq ? get_sif_sq_sw(sdev, qp->qp_idx) : NULL; unsigned long flags; bool doorbell_mode; bool last; @@ -47,6 +47,12 @@ int sif_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int ret = 0; int n; + if (unlikely(!sq)) { + sif_log(sdev, SIF_INFO, "sq not defined for qp %d (type %s)", + qp->qp_idx, string_enum_psif_qp_trans(qp->type)); + return -EINVAL; + } + sif_log(sdev, SIF_SND, "on qp_idx %d wr 0x%p ibv type %d", qp->qp_idx, wr, wr->opcode); @@ -617,14 +623,13 @@ static int get_gsi_qp_idx(struct sif_qp *qp) int sif_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { - struct sif_qp *qp = to_sqp(ibqp); - struct sif_rq *rq; struct sif_dev *sdev = to_sdev(ibqp->device); struct sif_eps *es = &sdev->es[sdev->mbox_epsc]; + struct sif_qp *qp = to_sqp(ibqp); + struct sif_rq *rq = NULL; bool need_pma_pxy_qp = eps_version_ge(es, 0, 57) && (qp->qp_idx == 1 || qp->qp_idx == 3); - sif_log(sdev, SIF_RCV, "Enter: wr_id 0x%llx qp_idx %d", wr->wr_id, qp->qp_idx); @@ -634,13 +639,18 @@ int sif_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, wr->wr_id, qp->qp_idx); } + rq = get_rq(sdev, qp); + if (unlikely(!rq)) { + sif_log(sdev, SIF_INFO, "rq not defined for qp_idx %d (type %s)", + qp->qp_idx, string_enum_psif_qp_trans(qp->type)); + return -EINVAL; + } + if (qp->last_set_state == IB_QPS_RESET) { sif_log(sdev, SIF_INFO, "Invalid QP state (IB_QPS_RESET)"); return -EINVAL; } - rq = get_sif_rq(sdev, qp->rq_idx); - if (wr->num_sge > rq->sg_entries) { sif_log(sdev, SIF_INFO, "qp only supports %d receive sg entries - wr has %d", rq->sg_entries, wr->num_sge); @@ -741,7 +751,7 @@ err_post_recv: *bad_wr = wr; /* WA #622, Check if QP in ERROR, flush RQ */ - if (!rq->is_srq && is_regular_qp(qp) && qp->last_set_state == IB_QPS_ERR) { + if (!rq->is_srq && qp->last_set_state == IB_QPS_ERR) { if (sif_flush_rq(sdev, rq, qp, atomic_read(&rq_sw->length))) sif_log(sdev, SIF_INFO, "failed to flush RQ %d", rq->index); } diff --git a/drivers/infiniband/hw/sif/sif_sq.c b/drivers/infiniband/hw/sif/sif_sq.c index 2d5bcd26e5327..c8cd686eb022f 100644 --- a/drivers/infiniband/hw/sif/sif_sq.c +++ b/drivers/infiniband/hw/sif/sif_sq.c @@ -19,6 +19,7 @@ #include "sif_mmu.h" #include "sif_pt.h" #include "sif_mr.h" +#include "sif_qp.h" #include "sif_sq.h" #include "sif_hwi.h" #include "psif_hw_setget.h" @@ -157,7 +158,7 @@ int sif_alloc_sq(struct sif_dev *sdev, struct sif_pd *pd, * For simplicity we impose the same for reliable QPs as their SQs * have to be page aligned to ensure proper access from SQ_CMPL: */ - need_page_aligned = user_mode || reliable_qp(qp->type); + need_page_aligned = user_mode || is_reliable_qp(qp->type); if (need_page_aligned && (alloc_sz & ~PAGE_MASK)) alloc_sz = (alloc_sz + ~PAGE_MASK) & PAGE_MASK; @@ -333,25 +334,25 @@ int sif_flush_sqs(struct sif_dev *sdev, struct sif_sq *sq) void sif_free_sq(struct sif_dev *sdev, struct sif_qp *qp) { - struct sif_sq *sq; + struct sif_sq *sq = get_sq(sdev, qp); volatile struct psif_sq_hw *sq_hw_p; volatile struct psif_sq_sw *sq_sw_p; - int index = qp->qp_idx; + if (is_xtgt_qp(qp)) + return; - sq = get_sif_sq(sdev, index); sif_log(sdev, SIF_SQ, "idx %d", sq->index); - sq_sw_p = get_sq_sw(sdev, index); + sq_sw_p = get_sq_sw(sdev, qp->qp_idx); sq_hw_p = &sq->d; - if (reliable_qp(qp->type) && qp->sq_cmpl_map_valid) + if (is_reliable_qp(qp->type) && qp->sq_cmpl_map_valid) sif_sq_cmpl_unmap_sq(sdev, sq); sif_unmap_ctx(sdev, &sq->mmu_ctx); /* We clear the whole sq field including sq_hw below */ - sif_clear_sq_sw(sdev, index); + sif_clear_sq_sw(sdev, qp->qp_idx); if (sq->sg_mr) dealloc_mr(sdev, sq->sg_mr); @@ -480,7 +481,7 @@ void sif_dfs_print_sq_cmpl(struct seq_file *s, struct sif_dev *sdev, loff_t pos) return; /* Only QPs with multipacket support is mapped here; */ - if (!reliable_qp(qp->type)) + if (!is_reliable_qp(qp->type)) return; if (sif_pt_entry(ctx->pt, virt_base, &dma_start, &val)) @@ -495,24 +496,3 @@ void sif_dfs_print_sq_cmpl(struct seq_file *s, struct sif_dev *sdev, loff_t pos) } seq_puts(s, "]\n"); } - - -bool multipacket_qp(enum psif_qp_trans type) -{ - switch (type) { - case PSIF_QP_TRANSPORT_RC: - case PSIF_QP_TRANSPORT_UC: - case PSIF_QP_TRANSPORT_XRC: - return true; - default: - return false; - } -} - - -bool reliable_qp(enum psif_qp_trans type) -{ - return - type == PSIF_QP_TRANSPORT_RC || - type == PSIF_QP_TRANSPORT_XRC; -} diff --git a/drivers/infiniband/hw/sif/sif_sq.h b/drivers/infiniband/hw/sif/sif_sq.h index 6d0cc306f4c37..e53ac2afaa6d5 100644 --- a/drivers/infiniband/hw/sif/sif_sq.h +++ b/drivers/infiniband/hw/sif/sif_sq.h @@ -67,7 +67,4 @@ int sif_sq_cmpl_unmap_sq(struct sif_dev *sdev, struct sif_sq *sq); void sif_dfs_print_sq_hw(struct seq_file *s, struct sif_dev *sdev, loff_t pos); void sif_dfs_print_sq_cmpl(struct seq_file *s, struct sif_dev *sdev, loff_t pos); -bool multipacket_qp(enum psif_qp_trans type); -bool reliable_qp(enum psif_qp_trans type); - #endif