last_seq, last_gen_seq);
for (; (!GREATER_16(last_seq, last_gen_seq)); ++last_seq) {
+ if (unlikely(cq->entries < ((u32) (last_seq - sq_sw->head_seq)))) {
+ sif_log(sdev, SIF_INFO, "cq (%d) is full! (len = %d, used = %d)",
+ cq->index, cq->entries, last_seq - sq_sw->head_seq - 1 );
+ goto err_post_wa4074;
+ }
+
sif_log(sdev, SIF_WCE_V, "generate completion %x",
last_seq);
u32 head, tail;
unsigned long flags;
enum sif_mqp_type mqp_type = SIF_MQP_SW;
+ struct sif_cq *cq = rq ? get_sif_cq(sdev, rq->cq_idx) : NULL;
DECLARE_SIF_CQE_POLL(sdev, lcqe);
/* if flush RQ is in progress, set FLUSH_RQ_IN_FLIGHT.
if ((lqps.state.expected_opcode != NO_OPERATION_IN_PROGRESS) &&
(lqps.state.committed_received_psn + 1 == lqps.state.expected_psn)) {
int entries;
- struct sif_cq *cq = get_sif_cq(sdev, lqps.state.rcv_cq_indx);
struct sif_cq_sw *cq_sw;
unsigned long timeout;
* these give no pqp completions but may in theory fail
*/
while (real_len > 0) {
+ if (unlikely(cq->entries < ((u32) atomic_read(&rq_sw->length)))) {
+ sif_log(sdev, SIF_INFO, "cq (%d) is full! (len = %d, used = %d)",
+ cq->index, cq->entries, atomic_read(&rq_sw->length));
+ goto free_rq_error;
+ }
+
sif_log(sdev, SIF_PQP, "rq %d, len %d", rq->index, real_len);
+
ret = sif_gen_rq_flush_cqe(sdev, rq, head, target_qp);
if (ret)
sif_log(sdev, SIF_INFO, "rq %d, len %d, sif_gen_rq_flush_cqe returned %d",