struct sif_sq *sq;
struct psif_cq_entry *cqe;
bool self_destruct;
+ bool dyn_lcqe = false;
struct sif_dev *sdev = to_sdev(pqp->qp->ibqp.device);
self_destruct = (type == cq_hw) && (index == pqp->cq->index);
sts = sif_pqp_write_send(pqp, &wr, NULL, p_mode);
if (sts != -EAGAIN)
return sts;
- /* In the EAGAIN case, fall through to post a new request with completion
- * to be able to use the quota beyond lowpri_lim
+ /* In the EAGAIN case, post a new (synchronous) request with completion
+ * to be able to use the quota beyond lowpri_lim.
+ * Note that here lcqe is NULL so we need to dynamically allocate and initialize
+ * one:
*/
+ BUG_ON(lcqe);
+ sif_log(sdev, SIF_INFO_V, "pqp %d: async post made sync due to almost full PQP",
+ index);
+ lcqe = kzalloc(sizeof(*lcqe), GFP_KERNEL);
+ if (!lcqe)
+ return -ENOMEM;
+ /* See DECLARE_SIF_CQE_POLL */
+ lcqe->cqe.status = PSIF_WC_STATUS_FIELD_MAX;
+ lcqe->pqp = get_pqp(sdev);
+ dyn_lcqe = true;
}
wr.completion = 1;
return ncompleted;
}
+ if (dyn_lcqe) {
+ kfree(lcqe);
+ return 0;
+ }
+
/* Note that we operate on 3 different indices here! */
cqe = &lcqe->cqe;
pqp_sq_idx = pqp->qp->qp_idx;
if (sq_next != prev_sq_next) {
/* Reset timeout */
timeout = jiffies + sdev->min_resp_ticks * 2;
- sif_log(sdev, SIF_INFO_V, "sq %d: sq_next moved from %d -> %d",
+ sif_log(sdev, SIF_SQ, "sq %d: sq_next moved from %d -> %d",
sq->index, prev_sq_next, sq_next);
} else if (time_is_before_jiffies(timeout)) {
if (sif_feature(pcie_trigger))