/* see post_send() */
                barrier();
                rvt_put_swqe(wqe);
-               rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
+               rvt_qp_swqe_complete(qp,
+                                    wqe,
+                                    ib_hfi1_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
        }
        /*
         * If we were waiting for sends to complete before re-sending,
                qp->s_last = s_last;
                /* see post_send() */
                barrier();
-               rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
+               rvt_qp_swqe_complete(qp,
+                                    wqe,
+                                    ib_hfi1_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
        } else {
                struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
 
 
            qp->ibqp.qp_type == IB_QPT_GSI)
                atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
 
-       rvt_qp_swqe_complete(qp, wqe, status);
+       rvt_qp_swqe_complete(qp,
+                            wqe,
+                            ib_hfi1_wc_opcode[wqe->wr.opcode],
+                            status);
 
        if (qp->s_acked == old_last)
                qp->s_acked = last;
 
        return atomic_read(&wss.total_count) >= wss.threshold;
 }
 
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
+       [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+       [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+       [IB_WR_SEND] = IB_WC_SEND,
+       [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+       [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+       [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+       [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
+       [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
+       [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
+       [IB_WR_REG_MR] = IB_WC_REG_MR
+};
+
 /*
  * Length of header by opcode, 0 --> not supported
  */
 
                /* see post_send() */
                barrier();
                rvt_put_swqe(wqe);
-               rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
+               rvt_qp_swqe_complete(qp,
+                                    wqe,
+                                    ib_qib_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
        }
        /*
         * If we were waiting for sends to complete before resending,
                qp->s_last = s_last;
                /* see post_send() */
                barrier();
-               rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
+               rvt_qp_swqe_complete(qp,
+                                    wqe,
+                                    ib_qib_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
        } else
                this_cpu_inc(*ibp->rvp.rc_delayed_comp);
 
 
            qp->ibqp.qp_type == IB_QPT_GSI)
                atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
 
-       rvt_qp_swqe_complete(qp, wqe, status);
+       rvt_qp_swqe_complete(qp,
+                            wqe,
+                            ib_qib_wc_opcode[wqe->wr.opcode],
+                            status);
 
        if (qp->s_acked == old_last)
                qp->s_acked = last;
 
 module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
 
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_qib_wc_opcode[] = {
+       [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+       [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+       [IB_WR_SEND] = IB_WC_SEND,
+       [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+       [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+       [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+       [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
+};
+
 /*
  * System image GUID.
  */
 
 };
 EXPORT_SYMBOL(ib_rvt_state_ops);
 
-/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_rvt_wc_opcode[] = {
-       [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
-       [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
-       [IB_WR_SEND] = IB_WC_SEND,
-       [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
-       [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
-       [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
-       [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
-       [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
-       [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
-       [IB_WR_REG_MR] = IB_WC_REG_MR
-};
-EXPORT_SYMBOL(ib_rvt_wc_opcode);
-
 static void get_map_page(struct rvt_qpn_table *qpt,
                         struct rvt_qpn_map *map,
                         gfp_t gfp)
 
 static inline void rvt_qp_swqe_complete(
        struct rvt_qp *qp,
        struct rvt_swqe *wqe,
+       enum ib_wc_opcode opcode,
        enum ib_wc_status status)
 {
        if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
                memset(&wc, 0, sizeof(wc));
                wc.wr_id = wqe->wr.wr_id;
                wc.status = status;
-               wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode];
+               wc.opcode = opcode;
                wc.qp = &qp->ibqp;
                wc.byte_len = wqe->length;
                rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,