]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sif: sq/rq: Do not generate completion if target CQ is full
authorWei Lin Guay <wei.lin.guay@oracle.com>
Fri, 5 Aug 2016 13:02:07 +0000 (15:02 +0200)
committerSantosh Shilimkar <santosh.shilimkar@oracle.com>
Fri, 12 Aug 2016 19:18:11 +0000 (12:18 -0700)
Orabug: 24378690

SIF driver needs to generate FLUSHED-IN-ERR completions using pqp
during the QP tear down phase. Neverthelss, a faulty application or an
application that does not rely on the completion (e.g ibv_*pingpong)
might cause pqp to generate completion to a full CQ. Consequently, the
pqp transitions to ERR state, and this will eventually cause the
system to crash.  This patch checks for this scenario to prevent
system crash.

Signed-off-by: Wei Lin Guay <wei.lin.guay@oracle.com>
Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com>
Reviewed-by: Knut Omang <knut.omang@oracle.com>
drivers/infiniband/hw/sif/sif_r3.c
drivers/infiniband/hw/sif/sif_rq.c

index b9a996ec12ce2c76af9b7487351da38ce7b7a98b..133f952a31d75bfc34569b6fe7964ce1640415bf 100644 (file)
@@ -694,6 +694,12 @@ flush_sq_again:
                last_seq, last_gen_seq);
 
        for (; (!GREATER_16(last_seq, last_gen_seq)); ++last_seq) {
+               if (unlikely(cq->entries < ((u32) (last_seq - sq_sw->head_seq)))) {
+                       sif_log(sdev, SIF_INFO, "cq (%d) is  full! (len = %d, used = %d)",
+                               cq->index, cq->entries, last_seq - sq_sw->head_seq - 1 );
+                       goto err_post_wa4074;
+               }
+
                sif_log(sdev, SIF_WCE_V, "generate completion %x",
                        last_seq);
 
index 387192695c8ed9fe2f808a3ffd7d2a4d4e6e5743..8cad3c75fd2870e27bc64a548b230d1ce81aa774 100644 (file)
@@ -336,6 +336,7 @@ static void sif_flush_rq(struct work_struct *work)
        u32 head, tail;
        unsigned long flags;
        enum sif_mqp_type mqp_type = SIF_MQP_SW;
+       struct sif_cq *cq = rq ? get_sif_cq(sdev, rq->cq_idx) : NULL;
        DECLARE_SIF_CQE_POLL(sdev, lcqe);
 
        /* if flush RQ is in progress, set FLUSH_RQ_IN_FLIGHT.
@@ -468,7 +469,6 @@ flush_rq_again:
                                if ((lqps.state.expected_opcode != NO_OPERATION_IN_PROGRESS) &&
                                    (lqps.state.committed_received_psn + 1 == lqps.state.expected_psn)) {
                                        int entries;
-                                       struct sif_cq *cq = get_sif_cq(sdev, lqps.state.rcv_cq_indx);
                                        struct sif_cq_sw *cq_sw;
                                        unsigned long timeout;
 
@@ -514,7 +514,14 @@ flush_rq_again:
                 * these give no pqp completions but may in theory fail
                 */
                while (real_len > 0) {
+                       if (unlikely(cq->entries < ((u32) atomic_read(&rq_sw->length)))) {
+                               sif_log(sdev, SIF_INFO, "cq (%d) is full! (len = %d, used = %d)",
+                                       cq->index, cq->entries, atomic_read(&rq_sw->length));
+                               goto free_rq_error;
+                       }
+
                        sif_log(sdev, SIF_PQP, "rq %d, len %d", rq->index, real_len);
+
                        ret = sif_gen_rq_flush_cqe(sdev, rq, head, target_qp);
                        if (ret)
                                sif_log(sdev, SIF_INFO, "rq %d, len %d, sif_gen_rq_flush_cqe returned %d",