#include <linux/sunrpc/xdr.h>
 #include <linux/sunrpc/svcsock.h>
 #include <linux/sunrpc/rpc_rdma.h>
+#include <linux/sunrpc/rpc_rdma_cid.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/rdma_cm.h>
 
        struct work_struct   sc_work;
 
        struct llist_head    sc_recv_ctxts;
+
+       atomic_t             sc_completion_ids;
 };
 /* sc_flags */
 #define RDMAXPRT_CONN_PENDING  3
        struct list_head        rc_list;
        struct ib_recv_wr       rc_recv_wr;
        struct ib_cqe           rc_cqe;
+       struct rpc_rdma_cid     rc_cid;
        struct ib_sge           rc_recv_sge;
        void                    *rc_recv_buf;
        struct xdr_buf          rc_arg;
 
                                ),                                      \
                                TP_ARGS(wc, cid))
 
-DEFINE_COMPLETION_EVENT(dummy);
-
 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
        TP_PROTO(
                const struct rpcrdma_rep *rep
 
 TRACE_EVENT(svcrdma_post_recv,
        TP_PROTO(
-               const struct ib_recv_wr *wr,
-               int status
+               const struct svc_rdma_recv_ctxt *ctxt
        ),
 
-       TP_ARGS(wr, status),
+       TP_ARGS(ctxt),
 
        TP_STRUCT__entry(
-               __field(const void *, cqe)
-               __field(int, status)
+               __field(u32, cq_id)
+               __field(int, completion_id)
        ),
 
        TP_fast_assign(
-               __entry->cqe = wr->wr_cqe;
-               __entry->status = status;
+               __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+               __entry->completion_id = ctxt->rc_cid.ci_completion_id;
        ),
 
-       TP_printk("cqe=%p status=%d",
-               __entry->cqe, __entry->status
+       TP_printk("cq.id=%d cid=%d",
+               __entry->cq_id, __entry->completion_id
        )
 );
 
-TRACE_EVENT(svcrdma_wc_receive,
+DEFINE_COMPLETION_EVENT(svcrdma_wc_receive);
+
+TRACE_EVENT(svcrdma_rq_post_err,
        TP_PROTO(
-               const struct ib_wc *wc
+               const struct svcxprt_rdma *rdma,
+               int status
        ),
 
-       TP_ARGS(wc),
+       TP_ARGS(rdma, status),
 
        TP_STRUCT__entry(
-               __field(const void *, cqe)
-               __field(u32, byte_len)
-               __field(unsigned int, status)
-               __field(u32, vendor_err)
+               __field(int, status)
+               __string(addr, rdma->sc_xprt.xpt_remotebuf)
        ),
 
        TP_fast_assign(
-               __entry->cqe = wc->wr_cqe;
-               __entry->status = wc->status;
-               if (wc->status) {
-                       __entry->byte_len = 0;
-                       __entry->vendor_err = wc->vendor_err;
-               } else {
-                       __entry->byte_len = wc->byte_len;
-                       __entry->vendor_err = 0;
-               }
+               __entry->status = status;
+               __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
        ),
 
-       TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
-               __entry->cqe, __entry->byte_len,
-               rdma_show_wc_status(__entry->status),
-               __entry->status, __entry->vendor_err
+       TP_printk("addr=%s status=%d",
+               __get_str(addr), __entry->status
        )
 );
 
 
                                        rc_list);
 }
 
+static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
+                                  struct rpc_rdma_cid *cid)
+{
+       cid->ci_queue_id = rdma->sc_rq_cq->res.id;
+       cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
 static struct svc_rdma_recv_ctxt *
 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
 {
        if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
                goto fail2;
 
+       svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
+
        ctxt->rc_recv_wr.next = NULL;
        ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
        ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
        int ret;
 
        svc_xprt_get(&rdma->sc_xprt);
+       trace_svcrdma_post_recv(ctxt);
        ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
-       trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
        if (ret)
                goto err_post;
        return 0;
 
 err_post:
+       trace_svcrdma_rq_post_err(rdma, ret);
        svc_rdma_recv_ctxt_put(rdma, ctxt);
        svc_xprt_put(&rdma->sc_xprt);
        return ret;
        struct ib_cqe *cqe = wc->wr_cqe;
        struct svc_rdma_recv_ctxt *ctxt;
 
-       trace_svcrdma_wc_receive(wc);
-
        /* WARNING: Only wc->wr_cqe and wc->status are reliable */
        ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
 
+       trace_svcrdma_wc_receive(wc, &ctxt->rc_cid);
        if (wc->status != IB_WC_SUCCESS)
                goto flushed;