return -EFAULT;
}
if (!unlikely(wh->used)) {
- if (sq_seq_num == wh->sq_seq)
- sif_log(sdev, SIF_INFO,
+ if (sq_seq_num == wh->sq_seq) {
+ sif_log(sdev, SIF_WCE,
"dupl cqe 0x%x for cq %d: got sq_seq 0x%x, last exp.0x%x, sts %d opc 0x%x",
cqe->seq_num, cq->index, sq_seq_num, wh->sq_seq,
cqe->status, cqe->opcode);
- else
+ return -EIO;
+ } else
sif_log(sdev, SIF_INFO,
"unexp. cqe 0x%x for cq %d: got sq_seq 0x%x, last exp.0x%x, sts %d opc 0x%x",
cqe->seq_num, cq->index, sq_seq_num, wh->sq_seq,
return -EFAULT;
}
if (unlikely(wh->sq_seq != sq_seq_num)) {
- sif_log(sdev, SIF_INFO,
+ bool duplicate_comp_wrap_case = (wh->sq_seq - sq_seq_num == sq->entries);
+ int log_level = duplicate_comp_wrap_case ? SIF_WCE : SIF_INFO;
+ sif_log(sdev, log_level,
"wrong cqe 0x%x for cq %d: got sq_seq 0x%x, expected 0x%x, sts %d opc 0x%x",
cqe->seq_num, cq->index, sq_seq_num, wh->sq_seq, cqe->status, cqe->opcode);
- return -EFAULT;
+ return duplicate_comp_wrap_case ? -EIO : -EFAULT;
}
*wr_id = wh->wr_id;
wh->used = false;
wc->wr_id = cqe->wc_id.rq_id;
/* No more work, when QP is gone */
- return 0;
+ return cqe->status == PSIF_WC_STATUS_DUPL_COMPL_ERR ? -EIO : 0;
}
ret = translate_wr_id(&wc->wr_id, sdev, cq, sq, cqe, sq_seq_num, cqe->qp);
/* If a send completion, handle the wr_id */
ret = translate_wr_id(&wr_id_host_order, sdev, cq, sq, &lcqe,
lcqe.wc_id.sq_id.sq_seq_num, lcqe.qp);
- if (ret)
+ if (ret == -EIO)
+ set_psif_cq_entry__status(cqe, PSIF_WC_STATUS_DUPL_COMPL_ERR);
+ else if (ret)
goto err;
set_psif_cq_entry__wc_id(cqe, wr_id_host_order);
polled_value = get_psif_cq_entry__seq_num(cqe);
- if ((test_bit(CQ_POLLING_IGNORED_SEQ, &cq_sw->flags)) && ~seqno == polled_value) {
- seqno = ++cq_sw->next_seq;
- clear_bit(CQ_POLLING_IGNORED_SEQ, &cq_sw->flags);
- continue;
- }
-
if (seqno == polled_value)
npolled++;
else
if (likely(wc)) {
ret = handle_wc(sdev, cq, cqe, wc);
- if (ret < 0)
+ if (unlikely(ret == -EIO)) {
+ /* -EIO indicates that this is the duplicate
+ * FLUSH-IN-ERR completion generated by the HW.
+ */
+ seqno = ++cq_sw->next_seq;
+ npolled--;
+ ret = 0;
+ continue;
+ } else if (ret < 0)
goto handle_failed;
wc++;
seqno = ++cq_sw->next_seq;
return IB_WC_RESP_TIMEOUT_ERR;
case PSIF_WC_STATUS_GENERAL_ERR:
return IB_WC_GENERAL_ERR;
+ /* A software defined state to indicate duplicate completion
+ * generated by HW.
+ *
+ * case PSIF_WC_STATUS_DUPL_COMPL_ERR:
+ */
case PSIF_WC_STATUS_FIELD_MAX:
return -1;
}
};
#define GREATER_16(a, b) ((s16)((s16)(a) - (s16)(b)) > 0)
+#define LESS_OR_EQUAL_16(a, b) (!(GREATER_16((a), (b))))
+#define PSIF_WC_STATUS_DUPL_COMPL_ERR (PSIF_WC_STATUS_FIELD_MAX - 1)
#define XFILE struct xchar
enum sq_sw_state {
FLUSH_SQ_IN_PROGRESS = 0,
FLUSH_SQ_IN_FLIGHT = 1,
+ FLUSH_SQ_FIRST_TIME = 2,
};
struct sif_sq_sw {
__u16 head_seq; /* Last sq seq.number seen in a compl (req. cq->lock) */
__u16 trusted_seq; /* Last next_seq that was either generate or exist in the cq */
__u8 tsl; /* Valid after transition to RTR */
+ bool need_flush; /* user level flag to indicate SQ needs flushing*/
unsigned long flags; /* Flags, using unsigned long due to test_set/test_and_set_bit */
};
enum cq_sw_state {
CQ_POLLING_NOT_ALLOWED = 0,
- CQ_POLLING_IGNORED_SEQ = 1,
- FLUSH_SQ_FIRST_TIME = 2,
};
struct sif_cq_sw {
}
/* Restore QP SW state to ERROR */
qp->last_set_state = qp->tracked_state = IB_QPS_ERR;
+ if (qp->flags & SIF_QPF_USER_MODE) {
+ struct sif_sq *sq = get_sq(sdev, qp);
+ struct sif_sq_sw *sq_sw = sq ? get_sif_sq_sw(sdev, qp->qp_idx) : NULL;
+ if (sq_sw)
+ sq_sw->need_flush = true;
+ }
}
qp->flags &= ~SIF_QPF_HW_OWNED;
}
return ret;
case FLUSH_SQ:
+ sif_log(sdev, SIF_WCE_V, "user trying to flush SQ %d", qp->qp_idx);
+
if (unlikely(!sq)) {
ret = -EINVAL;
sif_log(sdev, SIF_INFO,
}
copy_conv_to_sw(&lqqp, &qp->d, sizeof(lqqp));
- last_seq = sq_sw->last_seq;
+ last_seq = READ_ONCE(sq_sw->last_seq);
set_bit(CQ_POLLING_NOT_ALLOWED, &cq_sw->flags);
* completed before generating a sq_flush_cqe.
*/
spin_lock_irqsave(&sq->lock, flags);
- last_gen_seq = sq_sw->last_seq;
+ last_gen_seq = READ_ONCE(sq_sw->last_seq);
spin_unlock_irqrestore(&sq->lock, flags);
sif_log(sdev, SIF_WCE_V, "generate completion from %x to %x",
last_seq, last_gen_seq);
- for (; (!GREATER_16(last_seq, last_gen_seq)); ++last_seq) {
+ for (; (LESS_OR_EQUAL_16(last_seq, last_gen_seq)); ++last_seq) {
if (unlikely(cq->entries < ((u32) (last_seq - sq_sw->head_seq)))) {
sif_log(sdev, SIF_INFO, "cq (%d) is full! (len = %d, used = %d)",
cq->index, cq->entries, last_seq - sq_sw->head_seq - 1 );
sq_sw->trusted_seq = last_seq;
check_in_flight_and_return:
+ last_gen_seq = READ_ONCE(sq_sw->last_seq);
+
+
+ if (LESS_OR_EQUAL_16(last_seq, last_gen_seq))
+ goto flush_sq_again;
+
if (test_and_clear_bit(FLUSH_SQ_IN_FLIGHT, &sq_sw->flags))
goto flush_sq_again;
last_seq = lcqe.wc_id.sq_id.sq_seq_num;
sif_log(sdev, SIF_WCE_V, "last_seq %x updated_seq %x lcqe.seq_num %x",
last_seq, updated_seq, lcqe.seq_num);
- if (last_seq != updated_seq) {
+ if (last_seq != updated_seq)
lcqe.wc_id.sq_id.sq_seq_num = updated_seq;
- if (GREATER_16(updated_seq, end)) {
- /* A scenario might be that an additional CQE
- * must be generated to flush all the HW
- * generated completions. Thus, ignore the polling of the cqe.
- */
- lcqe.seq_num = ~lcqe.seq_num;
- sif_log(sdev, SIF_WCE_V, "corrupt: lcqe.seq_num %x",
- lcqe.seq_num);
- set_bit(CQ_POLLING_IGNORED_SEQ, &cq_sw->flags);
- }
- copy_conv_to_hw(cqe, &lcqe, sizeof(lcqe));
- }
- if (!GREATER_16(updated_seq, end))
+
+ if (GREATER_16(updated_seq, end))
+ lcqe.wc_id.sq_id.sq_seq_num = end;
+
+ copy_conv_to_hw(cqe, &lcqe, sizeof(lcqe));
+
+ if (LESS_OR_EQUAL_16(updated_seq, end))
updated_seq++;
++n;
}
*
*/
#define SIF_UVERBS_ABI_MAJOR_VERSION 3
-#define SIF_UVERBS_ABI_MINOR_VERSION 4
+#define SIF_UVERBS_ABI_MINOR_VERSION 5
#define SIF_UVERBS_VERSION(x, y) ((x) << 8 | (y))