/* Queue pairs */
 
+static void __ib_qp_event_handler(struct ib_event *event, void *context)
+{
+       struct ib_qp *qp = event->element.qp;
+
+       if (event->event == IB_EVENT_QP_LAST_WQE_REACHED)
+               complete(&qp->srq_completion);
+       if (qp->registered_event_handler)
+               qp->registered_event_handler(event, qp->qp_context);
+}
+
 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
 {
        struct ib_qp *qp = context;
        qp->qp_type = attr->qp_type;
        qp->rwq_ind_tbl = attr->rwq_ind_tbl;
        qp->srq = attr->srq;
-       qp->event_handler = attr->event_handler;
+       qp->event_handler = __ib_qp_event_handler;
+       qp->registered_event_handler = attr->event_handler;
        qp->port = attr->port_num;
        qp->qp_context = attr->qp_context;
 
        spin_lock_init(&qp->mr_lock);
        INIT_LIST_HEAD(&qp->rdma_mrs);
        INIT_LIST_HEAD(&qp->sig_mrs);
+       init_completion(&qp->srq_completion);
 
        qp->send_cq = attr->send_cq;
        qp->recv_cq = attr->recv_cq;
                wait_for_completion(&rdrain.done);
 }
 
+/*
+ * __ib_drain_srq() - Block until Last WQE Reached event arrives, or timeout
+ *                    expires.
+ * @qp:               queue pair associated with SRQ to drain
+ *
+ * Quoting 10.3.1 Queue Pair and EE Context States:
+ *
+ * Note, for QPs that are associated with an SRQ, the Consumer should take the
+ * QP through the Error State before invoking a Destroy QP or a Modify QP to the
+ * Reset State.  The Consumer may invoke the Destroy QP without first performing
+ * a Modify QP to the Error State and waiting for the Affiliated Asynchronous
+ * Last WQE Reached Event. However, if the Consumer does not wait for the
+ * Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment
+ * leakage may occur. Therefore, it is good programming practice to tear down a
+ * QP that is associated with an SRQ by using the following process:
+ *
+ * - Put the QP in the Error State
+ * - Wait for the Affiliated Asynchronous Last WQE Reached Event;
+ * - either:
+ *       drain the CQ by invoking the Poll CQ verb and either wait for CQ
+ *       to be empty or the number of Poll CQ operations has exceeded
+ *       CQ capacity size;
+ * - or
+ *       post another WR that completes on the same CQ and wait for this
+ *       WR to return as a WC;
+ * - and then invoke a Destroy QP or Reset QP.
+ *
+ * We use the first option.
+ */
+static void __ib_drain_srq(struct ib_qp *qp)
+{
+       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+       struct ib_cq *cq;
+       int n, polled = 0;
+       int ret;
+
+       if (!qp->srq) {
+               WARN_ONCE(1, "QP 0x%p is not associated with SRQ\n", qp);
+               return;
+       }
+
+       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+       if (ret) {
+               WARN_ONCE(ret, "failed to drain shared recv queue: %d\n", ret);
+               return;
+       }
+
+       if (ib_srq_has_cq(qp->srq->srq_type)) {
+               cq = qp->srq->ext.cq;
+       } else if (qp->recv_cq) {
+               cq = qp->recv_cq;
+       } else {
+               WARN_ONCE(1, "QP 0x%p has no CQ associated with SRQ\n", qp);
+               return;
+       }
+
+       if (wait_for_completion_timeout(&qp->srq_completion, 60 * HZ) > 0) {
+               while (polled != cq->cqe) {
+                       n = ib_process_cq_direct(cq, cq->cqe - polled);
+                       if (!n)
+                               return;
+                       polled += n;
+               }
+       }
+}
+
 /**
  * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
  *                application.
        ib_drain_sq(qp);
        if (!qp->srq)
                ib_drain_rq(qp);
+       else
+               __ib_drain_srq(qp);
 }
 EXPORT_SYMBOL(ib_drain_qp);