From: Pradeep Gopanapalli Date: Thu, 10 Nov 2016 22:49:48 +0000 (+0000) Subject: xsigo: Hardening driver in handling remote QP failures X-Git-Tag: v4.1.12-92~36^2~2 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=612933b3e7c890b2a29c150f2516bf30ad0698ee;p=users%2Fjedix%2Flinux-maple.git xsigo: Hardening driver in handling remote QP failures Orabug: 24929076 Handle scenario's where PSIF generates batched transmit completions. In case of Remote QP disconnect follow this sequence -> If there are any pending transmit completion explicitly transition QP state to ERROR. -> Wait for a maximum of 10 seconds for all pending completions (10 seconds derived from retrycount * local ack timeout) Destroy QP Wait for another 10 seconds(max) if completions are not returned by hardware. Synchronized calls to poll_tx Added more efficiency in handling of TX queue full condtion. Handle scenario where uVNIC removal can come in batches Reported-by: Pradeep Gopanapalli Signed-off-by: Pradeep Gopanapalli Reviewed-by: Aravind Kini Reviewed-by: viswa krishnamurthy Reviewed-by: Manish Kumar Singh Reviewed-by: Ariel cohen Reviewed-by: UmaShankar Tumari Mahabalagiri --- diff --git a/drivers/infiniband/ulp/xsigo/xscore/Makefile b/drivers/infiniband/ulp/xsigo/xscore/Makefile index d6f8c6557dda9..d800cc89ecfeb 100644 --- a/drivers/infiniband/ulp/xsigo/xscore/Makefile +++ b/drivers/infiniband/ulp/xsigo/xscore/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_INFINIBAND_XSCORE) := xscore.o xscore-y := xscore_impl.o xs_ud.o xscore_api.o xsmp.o \ xscore_stats.o xscore_uadm.o -ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8035\" +ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8037\" ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT diff --git a/drivers/infiniband/ulp/xsigo/xsvhba/Makefile b/drivers/infiniband/ulp/xsigo/xsvhba/Makefile index 6d9c332c0bb3c..9baf33ffa148f 100644 --- a/drivers/infiniband/ulp/xsigo/xsvhba/Makefile +++ b/drivers/infiniband/ulp/xsigo/xsvhba/Makefile @@ -3,7 +3,7 @@ xsvhba-y := vhba_main.o vhba_xsmp.o vhba_create.o vhba_init.o vhba_delete.o \ vhba_attr.o vhba_wq.o vhba_proc.o vhba_stats.o vhba_ib.o \ vhba_scsi_intf.o vhba_align.o -ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8035\" +ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8037\" ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT diff --git a/drivers/infiniband/ulp/xsigo/xsvnic/Makefile b/drivers/infiniband/ulp/xsigo/xsvnic/Makefile index d496e85166026..abd8202f8f8db 100644 --- a/drivers/infiniband/ulp/xsigo/xsvnic/Makefile +++ b/drivers/infiniband/ulp/xsigo/xsvnic/Makefile @@ -1,7 +1,7 @@ obj-$(CONFIG_INFINIBAND_XSVNIC) := xsvnic.o xsvnic-y := xsvnic_main.o xsvnic_stats.o -ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8035\" +ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8037\" ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT diff --git a/drivers/infiniband/ulp/xsigo/xve/Makefile b/drivers/infiniband/ulp/xsigo/xve/Makefile index 48fb3f796f71f..58f03f2aad8de 100644 --- a/drivers/infiniband/ulp/xsigo/xve/Makefile +++ b/drivers/infiniband/ulp/xsigo/xve/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_INFINIBAND_XVE) := xve.o xve-y := xve_main.o xve_verbs.o xve_multicast.o xve_ib.o xve_tables.o \ xve_ethtool.o xve_cm.o xve_stats.o -ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8035\" +ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8037\" ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT diff --git a/drivers/infiniband/ulp/xsigo/xve/xve.h b/drivers/infiniband/ulp/xsigo/xve/xve.h index d656599f5ad3e..f0edb3b2b14c3 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve.h +++ b/drivers/infiniband/ulp/xsigo/xve/xve.h @@ -195,6 +195,7 @@ enum { XVE_FLAG_IB_EVENT = 14, XVE_FLAG_DONT_DETACH_MCAST = 15, XVE_MAX_BACKOFF_SECONDS = 16, + XVE_DRAIN_IN_PROGRESS = 17, }; enum xve_advert_types { @@ -295,6 +296,8 @@ enum { XVE_TX_RC_COUNTER, XVE_RC_RXCOMPL_COUNTER, XVE_RC_TXCOMPL_COUNTER, + XVE_RC_RXCOMPL_ERR_COUNTER, + XVE_RC_TXCOMPL_ERR_COUNTER, XVE_TX_MCAST_PKT, XVE_TX_BCAST_PKT, XVE_TX_MCAST_ARP_QUERY, @@ -506,6 +509,7 @@ extern u32 xve_counters[]; extern struct workqueue_struct *xve_taskqueue; extern struct workqueue_struct *xve_workqueue; extern int xve_mc_sendonly_timeout; +extern int xve_wait_txcompl; extern void xve_remove_procfs_root_entries(void); extern int xve_create_procfs_root_entries(void); @@ -571,6 +575,7 @@ struct xve_cm_stats { u32 rx_rate; u32 tx_bytes; u32 rx_bytes; + u32 tx_compl_err; }; diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_cm.c b/drivers/infiniband/ulp/xsigo/xve/xve_cm.c index 4b4f896a409d0..f21b5ca02cf4d 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_cm.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_cm.c @@ -41,6 +41,12 @@ static int rnr_retry_count = 4; module_param_named(rnr_retry_count, rnr_retry_count, int, 0644); MODULE_PARM_DESC(rnr_retry_count, "Max number rnr retries"); +int xve_wait_txcompl = 10; +module_param_named(xve_wait_txcompl, xve_wait_txcompl, int, 0644); + +static int xve_modify_qp = 1; +module_param_named(xve_modify_qp, xve_modify_qp, int, 0644); + #define XVE_CM_IETF_ID 0x1000000000000000ULL #define XVE_CM_RX_UPDATE_TIME (256 * HZ) @@ -548,6 +554,7 @@ void xve_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) wc->qp->qp_num, wc->status, wr_id, wc->vendor_err); INC_RX_DROP_STATS(priv, dev); + priv->counters[XVE_RC_RXCOMPL_ERR_COUNTER]++; goto repost; } @@ -647,16 +654,14 @@ static inline int post_send(struct xve_dev_priv *priv, } static void xve_cm_tx_buf_free(struct xve_dev_priv *priv, - struct xve_cm_buf *tx_req) + struct xve_cm_buf *tx_req, + struct xve_cm_ctx *tx, + uint32_t wr_id, uint32_t qp_num) { - if ((tx_req->skb == NULL) || (tx_req->mapping[0] == 0)) - xve_debug(DEBUG_DATA_INFO, priv, - "%s Contents of tx_req %p are NULL skb %p mapping %lld\n", - __func__, tx_req, tx_req->skb, tx_req->mapping[0]); - else - ib_dma_unmap_single(priv->ca, tx_req->mapping[0], - tx_req->skb->len, DMA_TO_DEVICE); + BUG_ON(tx_req == NULL || tx_req->skb == NULL); + ib_dma_unmap_single(priv->ca, tx_req->mapping[0], + tx_req->skb->len, DMA_TO_DEVICE); xve_dev_kfree_skb_any(priv, tx_req->skb, 1); memset(tx_req, 0, sizeof(struct xve_cm_buf)); } @@ -708,7 +713,7 @@ int xve_cm_send(struct net_device *dev, struct sk_buff *skb, xve_warn(priv, "QP[%d] post_send failed wr_id:%d ctx:%p", tx->qp->qp_num, wr_id, tx); INC_TX_ERROR_STATS(priv, dev); - xve_cm_tx_buf_free(priv, tx_req); + xve_cm_tx_buf_free(priv, tx_req, tx, 0, tx->qp->qp_num); } else { dev->trans_start = jiffies; ++tx->tx_head; @@ -746,7 +751,7 @@ void xve_cm_handle_tx_wc(struct net_device *dev, } tx_req = &tx->tx_ring[wr_id]; - xve_cm_tx_buf_free(priv, tx_req); + xve_cm_tx_buf_free(priv, tx_req, tx, wr_id, wc->qp->qp_num); netif_tx_lock(dev); ++tx->tx_tail; @@ -759,6 +764,8 @@ void xve_cm_handle_tx_wc(struct net_device *dev, } if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) { + priv->counters[XVE_RC_TXCOMPL_ERR_COUNTER]++; + tx->stats.tx_compl_err++; if (wc->status != IB_WC_RNR_RETRY_EXC_ERR) xve_warn(priv, "QP[%x] failed cm send event status:%d wrid:%d vend_err:%x", wc->qp->qp_num, wc->status, wr_id, @@ -1081,11 +1088,56 @@ err_tx: return ret; } +static int wait_for_txcmcompletions(struct xve_cm_ctx *p, u8 modify) +{ + struct xve_dev_priv *priv = netdev_priv(p->netdev); + unsigned long begin; + uint32_t qpnum = p->qp ? p->qp->qp_num : 0; + + + if (p->tx_ring) { + int num_loops = 0; + + begin = jiffies; + + while ((int)p->tx_tail - (int)p->tx_head < 0) { + if (!num_loops && xve_modify_qp && modify) { + ib_modify_qp(p->qp, &xve_cm_err_attr, + IB_QP_STATE); + xve_debug(DEBUG_CM_INFO, priv, + "M%d QP[%x] TX completions pending[%d]", + modify, qpnum, p->tx_head - p->tx_tail); + } + + /* If Oper State is down poll for completions */ + if (!test_bit(XVE_OPER_UP, &priv->state)) + xve_drain_cq(priv->netdev); + + if (time_after(jiffies, + begin + xve_wait_txcompl * HZ)) { + xve_warn(priv, + "M%d QP[%x] Tx Completions Pending[%d], Waited[%d:%d] state%d", + modify, qpnum, p->tx_head - p->tx_tail, + num_loops, xve_wait_txcompl, + test_bit(XVE_OPER_UP, &priv->state)); + return -EINVAL; + } + num_loops++; + msleep(20); + } + if (num_loops != 0) + xve_debug(DEBUG_CM_INFO, priv, "M%d QP%x Overall Wait[%d:%d]", + modify, qpnum, num_loops, + jiffies_to_msecs(jiffies - begin)); + } + + return 0; +} + static void xve_cm_tx_destroy(struct xve_cm_ctx *p) { struct xve_dev_priv *priv = netdev_priv(p->netdev); struct xve_cm_buf *tx_req; - unsigned long begin; unsigned long flags = 0; uint32_t qp_num = p->qp ? p->qp->qp_num : 0; @@ -1096,36 +1148,27 @@ static void xve_cm_tx_destroy(struct xve_cm_ctx *p) if (p->id) ib_destroy_cm_id(p->id); - if (p->tx_ring) { - /* Wait for all sends to complete */ - if (!netif_carrier_ok(priv->netdev) - && unlikely(priv->tx_outstanding > MAX_SEND_CQE)) - while (poll_tx(priv)) - ; /* nothing */ + wait_for_txcmcompletions(p, 1); - begin = jiffies; - while ((int)p->tx_tail - (int)p->tx_head < 0) { - if (time_after(jiffies, begin + 5 * HZ)) { - xve_warn(priv, - "timing out; %d sends not completed\n", - p->tx_head - p->tx_tail); - goto timeout; - } + /* Destroy QP and Wait for any pending completions */ + if (p->qp) + ib_destroy_qp(p->qp); - msleep(20); - } - } + pr_info("%s QP[%x] ctx:%p Destroyed head[0x%x] tail[0x%x]\n", + priv->xve_name, qp_num, p, p->tx_head, p->tx_tail); -timeout: + wait_for_txcmcompletions(p, 0); spin_lock_irqsave(&priv->lock, flags); while ((int)p->tx_tail - (int)p->tx_head < 0) { - tx_req = &p->tx_ring[p->tx_tail & (priv->xve_sendq_size - 1)]; + uint32_t wr_id = p->tx_tail & (priv->xve_sendq_size - 1); + + tx_req = &p->tx_ring[wr_id]; ++p->tx_tail; spin_unlock_irqrestore(&priv->lock, flags); - xve_cm_tx_buf_free(priv, tx_req); + xve_cm_tx_buf_free(priv, tx_req, p, 0, 0); netif_tx_lock_bh(p->netdev); if (unlikely(--priv->tx_outstanding == (priv->xve_sendq_size >> 1)) @@ -1140,10 +1183,6 @@ timeout: } spin_unlock_irqrestore(&priv->lock, flags); - xve_warn(priv, "QP[%x] Destroyed, head[0x%x] tail[0x%x]", - qp_num, p->tx_head, p->tx_tail); - if (p->qp) - ib_destroy_qp(p->qp); if (p->tx_ring) vfree(p->tx_ring); if (p != NULL) diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_ib.c b/drivers/infiniband/ulp/xsigo/xve/xve_ib.c index 4954c84a45360..4cae01e27910f 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_ib.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_ib.c @@ -522,12 +522,8 @@ static void xve_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) } tx_req = &priv->tx_ring[wr_id]; - if ((tx_req == NULL) || (tx_req->ah == NULL)) { - xve_debug(DEBUG_DATA_INFO, priv, - "%s [ca %p] wr_id%d content NULL\n", - __func__, priv->ca, wr_id); - return; - } + + BUG_ON(tx_req == NULL || tx_req->ah == NULL); xve_put_ah_refcnt(tx_req->ah); xve_free_txbuf_memory(priv, tx_req); @@ -550,22 +546,19 @@ static void xve_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) int poll_tx(struct xve_dev_priv *priv) { - int n, i, tot = 0; + int n, i; - do { - n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); - /* handle multiple WC's in one call */ - for (i = 0; i < n; ++i) - xve_ib_handle_tx_wc(priv->netdev, - priv->send_wc + i); - if (n < 0) { - xve_warn(priv, "%s ib_poll_cq() failed, rc %d\n", - __func__, n); - } - tot += n; - } while (n == MAX_SEND_CQE); + n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); + /* handle multiple WC's in one call */ + for (i = 0; i < n; ++i) + xve_ib_handle_tx_wc(priv->netdev, + priv->send_wc + i); + if (n < 0) { + xve_warn(priv, "%s ib_poll_cq() failed, rc %d\n", + __func__, n); + } - return tot; + return n == MAX_SEND_CQE; } static int poll_rx(struct xve_dev_priv *priv, int num_polls, int *done, @@ -712,7 +705,8 @@ static void xve_ib_tx_timer_func(unsigned long ctx) spin_lock_irqsave(&priv->lock, flags); if (test_bit(XVE_OPER_UP, &priv->state) && !test_bit(XVE_DELETING, &priv->state)) { - poll_tx(priv); + while (poll_tx(priv)) + ; /* nothing */ } spin_unlock_irqrestore(&priv->lock, flags); if (netif_queue_stopped(dev)) @@ -800,6 +794,18 @@ int xve_send(struct net_device *dev, struct sk_buff *skb, int id; id = priv->tx_head & (priv->xve_sendq_size - 1); + + /* + * Gateway broadcast packet's can come as that packets are not + * controlled by network layer + */ + if (priv->tx_outstanding >= priv->xve_sendq_size) { + xve_warn(priv, "TX QUEUE FULL %d head%d tail%d out%d type%d", + id, priv->tx_head, priv->tx_tail, + priv->tx_outstanding, type); + goto drop_pkt; + } + if (skb_is_gso(skb)) { hlen = skb_transport_offset(skb) + tcp_hdrlen(skb); phead = skb->data; @@ -895,10 +901,6 @@ int xve_send(struct net_device *dev, struct sk_buff *skb, } priv->send_hbeat_flag = 0; - if (unlikely(priv->tx_outstanding > SENDQ_LOW_WMARK)) { - priv->counters[XVE_TX_WMARK_REACH_COUNTER]++; - poll_tx(priv); - } if (packet_sent) priv->counters[XVE_TX_COUNTER]++; @@ -1043,7 +1045,14 @@ void xve_drain_cq(struct net_device *dev) { struct xve_dev_priv *priv = netdev_priv(dev); int n, done = 0; + unsigned long flags = 0; + if (test_and_set_bit(XVE_DRAIN_IN_PROGRESS, &priv->flags)) { + xve_info(priv, "Drain in progress[%d:%d:%d] state[%lx:%lx]", + priv->tx_outstanding, priv->tx_head, + priv->tx_tail, priv->flags, priv->state); + return; + } /* * We call completion handling routines that expect to be * called from the BH-disabled NAPI poll context, so disable @@ -1055,8 +1064,20 @@ void xve_drain_cq(struct net_device *dev) n = poll_rx(priv, XVE_NUM_WC, &done, 1); } while (n == XVE_NUM_WC); - poll_tx(priv); local_bh_enable(); + + /* Poll UD completions */ + netif_tx_lock_bh(dev); + spin_lock_irqsave(&priv->lock, flags); + + if (priv->tx_outstanding) + while (poll_tx(priv)) + ; /* nothing */ + + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(dev); + + clear_bit(XVE_DRAIN_IN_PROGRESS, &priv->flags); } int xve_ib_dev_open(struct net_device *dev) @@ -1141,7 +1162,8 @@ int xve_ib_dev_stop(struct net_device *dev, int flush) begin = jiffies; while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) { - if (time_after(jiffies, begin + 5 * HZ)) { + /* Wait for xve_wait_txcompl seconds */ + if (time_after(jiffies, begin + xve_wait_txcompl * HZ)) { xve_warn(priv, "%s timing out; %d sends %d receives not completed\n", __func__, priv->tx_head - priv->tx_tail, diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_main.c b/drivers/infiniband/ulp/xsigo/xve/xve_main.c index 4f58c980c4d6d..a5a6e4b0019e1 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_main.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_main.c @@ -647,12 +647,19 @@ static void path_rec_completion(int status, skb->dev = dev; xve_get_ah_refcnt(path->ah); priv->counters[XVE_PATHREC_GW_COUNTER]++; - /* Sending the queued GATEWAY Packet */ + /* Send G/W packet */ + netif_tx_lock_bh(dev); + spin_lock_irqsave(&priv->lock, flags); + ret = xve_send(dev, skb, path->ah, priv->gw.t_data_qp, 2); if (ret == NETDEV_TX_BUSY) { xve_warn(priv, "send queue full full, dropping packet for %s\n", priv->xve_name); - } + } + + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(dev); + } } @@ -822,8 +829,10 @@ int xve_gw_send(struct net_device *dev, struct sk_buff *skb) int ret = NETDEV_TX_OK; path = xve_get_gw_path(dev); - if (!path) + if (!path) { + dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; + } if (path->ah) { xve_dbg_data(priv, "Sending unicast copy to gw ah:%p dqpn:%u\n", @@ -1047,6 +1056,13 @@ unlock: dev_kfree_skb_any(skb); spin_unlock_irqrestore(&priv->lock, flags); + + if (unlikely(priv->tx_outstanding > SENDQ_LOW_WMARK)) { + priv->counters[XVE_TX_WMARK_REACH_COUNTER]++; + mod_timer(&priv->poll_timer, jiffies); + + } + return ret; } @@ -1206,9 +1222,11 @@ void handle_carrier_state(struct xve_dev_priv *priv, char state) { if (state) { priv->jiffies = jiffies; - priv->counters[XVE_TX_WAKE_UP_COUNTER]++; netif_carrier_on(priv->netdev); - netif_wake_queue(priv->netdev); + if (netif_queue_stopped(priv->netdev)) { + netif_wake_queue(priv->netdev); + priv->counters[XVE_TX_WAKE_UP_COUNTER]++; + } /* careful we are holding lock (priv->lock)inside this */ xve_data_recv_handler(priv); } else { @@ -2638,8 +2656,11 @@ static void xve_handle_del_message(xsmp_cookie_t xsmp_hndl, __func__, xmsgp->xve_name); return; } + xve_info(priv, "Start Deleting interface"); spin_lock_irqsave(&priv->lock, flags); set_bit(XVE_DELETING, &priv->state); + /*Set OperState to down*/ + clear_bit(XVE_OPER_UP, &priv->state); spin_unlock_irqrestore(&priv->lock, flags); } @@ -2785,6 +2806,8 @@ static void xve_xsmp_event_handler(xsmp_cookie_t xsmp_hndl, int event) if (xsmp_sessions_match(&priv->xsmp_info, xsmp_hndl)) { spin_lock_irqsave(&priv->lock, flags); set_bit(XVE_DELETING, &priv->state); + /*Set OperState to down*/ + clear_bit(XVE_OPER_UP, &priv->state); spin_unlock_irqrestore(&priv->lock, flags); } } @@ -2924,6 +2947,8 @@ static void __exit xve_cleanup_module(void) list_for_each_entry(priv, &xve_dev_list, list) { spin_lock_irqsave(&priv->lock, flags); set_bit(XVE_DELETING, &priv->state); + /*Set OperState to down*/ + clear_bit(XVE_OPER_UP, &priv->state); set_bit(XVE_SHUTDOWN, &priv->state); spin_unlock_irqrestore(&priv->lock, flags); } diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_stats.c b/drivers/infiniband/ulp/xsigo/xve/xve_stats.c index ff67fd07f2c94..b7497474e2108 100755 --- a/drivers/infiniband/ulp/xsigo/xve/xve_stats.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_stats.c @@ -127,8 +127,10 @@ static char *counter_name[XVE_MAX_COUNTERS] = { "mcast detach count:\t\t", "tx ud count:\t\t\t", "tx rc count:\t\t\t", - "rc tx compl count:\t\t\t", "rc rx compl count:\t\t\t", + "rc tx compl count:\t\t\t", + "rc rx compl error count:\t\t", + "rc tx compl error count:\t\t", "tx mcast count:\t\t\t", "tx broadcast count:\t\t\t", "tx arp count:\t\t\t",