From: Pradeep Gopanapalli Date: Tue, 1 Nov 2016 19:41:48 +0000 (+0000) Subject: xsigo: xve driver has excessive messages X-Git-Tag: v4.1.12-92~45^2~1 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=6e40fa866cb97cbda0c7ca6726d8e9ef5792de2e;p=users%2Fjedix%2Flinux-maple.git xsigo: xve driver has excessive messages Orabug: 24758335 Moved some message types from Warning to debug. Consolidated multiple messages into single to avoid flooding of messages on console Added more counters to identify state of vnic. Added a debug type xve_info Reported-by: chien yen Signed-off-by: Pradeep Gopanapalli Reviewed-by: Aravind Kini Reviewed-by: UmaShankar Tumari Mahabalagiri --- diff --git a/drivers/infiniband/ulp/xsigo/xscore/Makefile b/drivers/infiniband/ulp/xsigo/xscore/Makefile index 4d6ee34ba069b..6b2c8dcaa477a 100644 --- a/drivers/infiniband/ulp/xsigo/xscore/Makefile +++ b/drivers/infiniband/ulp/xsigo/xscore/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_INFINIBAND_XSCORE) := xscore.o xscore-y := xscore_impl.o xs_ud.o xscore_api.o xsmp.o \ xscore_stats.o xscore_uadm.o -ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8033\" +ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8034\" ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT diff --git a/drivers/infiniband/ulp/xsigo/xscore/xsmp.c b/drivers/infiniband/ulp/xsigo/xscore/xsmp.c index c7645a71e87d3..cdedaba2a3a58 100644 --- a/drivers/infiniband/ulp/xsigo/xscore/xsmp.c +++ b/drivers/infiniband/ulp/xsigo/xscore/xsmp.c @@ -937,7 +937,7 @@ static void xsmp_process_recv_msgs(struct work_struct *work) break; default: kfree(xwork->msg); - XSMP_ERROR("%s: Unknown message type: %d\n", __func__, + XSMP_INFO("%s: Unknown message type: %d\n", __func__, m_header->type); break; } diff --git a/drivers/infiniband/ulp/xsigo/xsvhba/Makefile b/drivers/infiniband/ulp/xsigo/xsvhba/Makefile index 5d562d1ac5ef5..55b873fc2a65d 100644 --- a/drivers/infiniband/ulp/xsigo/xsvhba/Makefile +++ b/drivers/infiniband/ulp/xsigo/xsvhba/Makefile @@ -3,7 +3,7 @@ xsvhba-y := vhba_main.o vhba_xsmp.o vhba_create.o vhba_init.o vhba_delete.o \ vhba_attr.o vhba_wq.o vhba_proc.o vhba_stats.o vhba_ib.o \ vhba_scsi_intf.o vhba_align.o -ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8033\" +ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8034\" ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT diff --git a/drivers/infiniband/ulp/xsigo/xsvnic/Makefile b/drivers/infiniband/ulp/xsigo/xsvnic/Makefile index 7d856ea2d029b..a3ab04a5ab7a4 100644 --- a/drivers/infiniband/ulp/xsigo/xsvnic/Makefile +++ b/drivers/infiniband/ulp/xsigo/xsvnic/Makefile @@ -1,7 +1,7 @@ obj-$(CONFIG_INFINIBAND_XSVNIC) := xsvnic.o xsvnic-y := xsvnic_main.o xsvnic_stats.o -ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8033\" +ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8034\" ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT diff --git a/drivers/infiniband/ulp/xsigo/xve/Makefile b/drivers/infiniband/ulp/xsigo/xve/Makefile index cfcef1d931198..a422c13497022 100644 --- a/drivers/infiniband/ulp/xsigo/xve/Makefile +++ b/drivers/infiniband/ulp/xsigo/xve/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_INFINIBAND_XVE) := xve.o xve-y := xve_main.o xve_verbs.o xve_multicast.o xve_ib.o xve_tables.o \ xve_ethtool.o xve_cm.o xve_stats.o -ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8033\" +ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8034\" ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT diff --git a/drivers/infiniband/ulp/xsigo/xve/xve.h b/drivers/infiniband/ulp/xsigo/xve/xve.h index 038be751dd031..d656599f5ad3e 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve.h +++ b/drivers/infiniband/ulp/xsigo/xve/xve.h @@ -227,6 +227,7 @@ enum { XVE_STATE_MACHINE_DOWN, XVE_STATE_MACHINE_IBCLEAR, XVE_NAPI_POLL_COUNTER, + XVE_NAPI_DROP_COUNTER, XVE_SHORT_PKT_COUNTER, XVE_TX_COUNTER, XVE_TX_SKB_FREE_COUNTER, @@ -311,6 +312,7 @@ enum { XVE_PATHREC_QUERY_COUNTER, XVE_PATHREC_RESP_COUNTER, XVE_PATHREC_RESP_ERR_COUNTER, + XVE_PATHREC_GW_COUNTER, XVE_SM_CHANGE_COUNTER, XVE_CLIENT_REREGISTER_COUNTER, @@ -431,7 +433,9 @@ enum { DEBUG_QP_INFO = 0x00040000, DEBUG_TX_INFO = 0x00080000, DEBUG_RX_INFO = 0x00100000, - DEBUG_TXDATA_INFO = 0x00200000 + DEBUG_TXDATA_INFO = 0x00200000, + DEBUG_INSTALL_INFO = 0x00400000, + DEBUG_FWTABLE_INFO = 0x00800000 }; #define XVE_OP_RECV (1ul << 31) @@ -910,11 +914,17 @@ struct icmp6_ndp { #define DRV_PRINT(fmt, arg...) \ PRINT(KERN_INFO, "DRV", fmt, ##arg) #define xve_printk(level, priv, format, arg...) \ - printk(level "%s: " format, \ + printk(level "%s: " format "\n", \ ((struct xve_dev_priv *) priv)->netdev->name, \ ## arg) #define xve_warn(priv, format, arg...) \ xve_printk(KERN_WARNING, priv, format, ## arg) +#define xve_info(priv, format, arg...) \ + do { \ + if (xve_debug_level & DEBUG_DRV_INFO) \ + xve_printk(KERN_INFO, priv, format, \ + ## arg); \ + } while (0) #define XSMP_INFO(fmt, arg...) \ do { \ @@ -949,11 +959,11 @@ struct icmp6_ndp { do { \ if (xve_debug_level & level) { \ if (priv) \ - printk("%s: " format, \ + pr_info("%s: " format "\n", \ ((struct xve_dev_priv *) priv)->netdev->name, \ ## arg); \ else \ - printk("XVE: " format, ## arg); \ + pr_info("XVE: " format "\n", ## arg); \ } \ } while (0) @@ -1528,7 +1538,7 @@ static inline void dbg_dump_raw_pkt(unsigned char *buff, int length, char *name) if (!(xve_debug_level & DEBUG_TEST_INFO)) return; - printk("%s. Packet length is %d\n", name, length); + pr_info("%s. Packet length is %d\n", name, length); tmp_len = (length >> 2) + 1; data_ptr = (u32 *) buff; for (i = 0; i < tmp_len; i++) { diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_cm.c b/drivers/infiniband/ulp/xsigo/xve/xve_cm.c index 78c83ea9ef79c..4b4f896a409d0 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_cm.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_cm.c @@ -59,7 +59,8 @@ static struct ib_send_wr xve_cm_rx_drain_wr = { .opcode = IB_WR_SEND, }; -static int xve_cm_tx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); +static int xve_cm_tx_handler(struct ib_cm_id *cm_id, + struct ib_cm_event *event); static void __xve_cm_tx_reap(struct xve_dev_priv *priv); static void xve_cm_dma_unmap_rx(struct xve_dev_priv *priv, int frags, @@ -91,7 +92,7 @@ static int xve_cm_post_receive_srq(struct net_device *netdev, int id) ret = ib_post_srq_recv(priv->cm.srq, wr, &bad_wr); if (unlikely(ret)) { - xve_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); + xve_warn(priv, "post srq failed for buf %d (%d)", id, ret); xve_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1, priv->cm.srq_ring[id].mapping); dev_kfree_skb_any(priv->cm.srq_ring[id].skb); @@ -112,7 +113,7 @@ static struct sk_buff *xve_cm_alloc_rx_skb(struct net_device *dev, skb = xve_dev_alloc_skb(priv, XVE_CM_HEAD_SIZE + NET_IP_ALIGN); if (unlikely(!skb)) { - xve_warn(priv, "%s Failed to allocate skb\n", __func__); + xve_warn(priv, "%s Failed to allocate skb", __func__); return NULL; } @@ -143,7 +144,7 @@ static struct sk_buff *xve_cm_alloc_rx_skb(struct net_device *dev, ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page.p, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) { - xve_warn(priv, "%s Failed to Map page\n", __func__); + xve_warn(priv, "%s Failed to Map page", __func__); goto partial_error; } } @@ -199,7 +200,7 @@ static void xve_cm_start_rx_drain(struct xve_dev_priv *priv) */ p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list); if (ib_post_send(p->qp, &xve_cm_rx_drain_wr, &bad_wr)) - xve_warn(priv, "failed to post drain wr\n"); + xve_warn(priv, "failed to post drain wr"); list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); } @@ -250,24 +251,24 @@ static int xve_cm_modify_rx_qp(struct net_device *dev, qp_attr.qp_state = IB_QPS_INIT; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { - xve_warn(priv, "failed to init QP attr for INIT: %d\n", ret); + xve_warn(priv, "failed to init QP attr for INIT: %d", ret); return ret; } ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { - xve_warn(priv, "failed to modify QP to INIT: %d\n", ret); + xve_warn(priv, "failed to modify QP to INIT: %d", ret); return ret; } qp_attr.qp_state = IB_QPS_RTR; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { - xve_warn(priv, "failed to init QP attr for RTR: %d\n", ret); + xve_warn(priv, "failed to init QP attr for RTR: %d", ret); return ret; } qp_attr.rq_psn = psn; ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { - xve_warn(priv, "failed to modify QP to RTR: %d\n", ret); + xve_warn(priv, "failed to modify QP to RTR: %d", ret); return ret; } @@ -282,12 +283,12 @@ static int xve_cm_modify_rx_qp(struct net_device *dev, qp_attr.qp_state = IB_QPS_RTS; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { - xve_warn(priv, "failed to init QP attr for RTS: %d\n", ret); + xve_warn(priv, "failed to init QP attr for RTS: %d", ret); return 0; } ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { - xve_warn(priv, "failed to modify QP to RTS: %d\n", ret); + xve_warn(priv, "failed to modify QP to RTS: %d", ret); return 0; } @@ -333,7 +334,8 @@ static int xve_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, return ib_send_cm_rep(cm_id, &rep); } -static int xve_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) +static int xve_cm_req_handler(struct ib_cm_id *cm_id, + struct ib_cm_event *event) { struct net_device *dev = cm_id->context; struct xve_dev_priv *priv = netdev_priv(dev); @@ -378,8 +380,8 @@ static int xve_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) char print[512]; print_mgid_buf(print, (char *)dgid->raw); - pr_info("XVE: %s Adding Rx QP to the path %s\n", - priv->xve_name, print); + pr_info("XVE: %s Adding Rx QP%x to the path %s ctx:%p\n", + priv->xve_name, p->qp->qp_num, print, p); path->cm_ctx_rx = p; } else { priv->counters[XVE_PATH_NOT_SETUP]++; @@ -395,9 +397,9 @@ static int xve_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) ret = xve_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); if (ret) { - xve_warn(priv, "failed to send REP: %d\n", ret); + xve_warn(priv, "failed to send REP: %d", ret); if (ib_modify_qp(p->qp, &xve_cm_err_attr, IB_QP_STATE)) - xve_warn(priv, "unable to move qp to error state\n"); + xve_warn(priv, "unable to move qp to error state"); } return 0; @@ -408,7 +410,8 @@ err_qp: return ret; } -static int xve_cm_rx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) +static int xve_cm_rx_handler(struct ib_cm_id *cm_id, + struct ib_cm_event *event) { struct xve_cm_ctx *p; struct xve_dev_priv *priv; @@ -424,7 +427,7 @@ static int xve_cm_rx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) p = cm_id->context; priv = netdev_priv(p->netdev); if (ib_modify_qp(p->qp, &xve_cm_err_attr, IB_QP_STATE)) - xve_warn(priv, "unable to move qp to error state\n"); + xve_warn(priv, "unable to move qp to error state"); /* Fall through */ default: return 0; @@ -450,8 +453,9 @@ static void xve_cm_free_rx_reap_list(struct net_device *dev) /* Adjust length of skb with fragments to match received data */ -static inline void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, - unsigned int length, struct sk_buff *toskb) +static inline void skb_put_frags(struct sk_buff *skb, + unsigned int hdr_space, + unsigned int length, struct sk_buff *toskb) { int i, num_frags; unsigned int size; @@ -499,7 +503,7 @@ void xve_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) struct sk_buff *small_skb; u16 vlan; - xve_dbg_data(priv, "cm recv completion: id %d, status: %d\n", + xve_dbg_data(priv, "cm recv completion: id %d, status: %d", wr_id, wc->status); if (unlikely(wr_id >= priv->xve_recvq_size)) { @@ -513,7 +517,7 @@ void xve_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) spin_unlock_irqrestore(&priv->lock, flags); } else xve_warn(priv, - "cm recv completion event with wrid %d (> %d)\n", + "cm recv completion event with wrid %d (> %d)", wr_id, priv->xve_recvq_size); return; } @@ -538,11 +542,11 @@ void xve_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) skb = rx_ring[wr_id].skb; if (unlikely(wc->status != IB_WC_SUCCESS)) { - if (!test_bit(XVE_DELETING, &priv->state)) { - pr_err("%s: cm recv error", priv->xve_name); - pr_err("(status=%d, wrid=%d", wc->status, wr_id); - pr_err("vend_err %x)\n", wc->vendor_err); - } + if (!test_bit(XVE_DELETING, &priv->state)) + xve_dbg_data(priv, + "cm recv err QP%x status:%d wr:%d vendor_err%x", + wc->qp->qp_num, wc->status, wr_id, + wc->vendor_err); INC_RX_DROP_STATS(priv, dev); goto repost; } @@ -623,10 +627,8 @@ copied: priv->counters[XVE_RC_RXCOMPL_COUNTER]++; xve_send_skb(priv, skb); repost: - if (unlikely(xve_cm_post_receive_srq(dev, wr_id))) { - xve_warn(priv, "xve_cm_post_receive_srq failed "); - xve_warn(priv, "for buf %d\n", wr_id); - } + if (unlikely(xve_cm_post_receive_srq(dev, wr_id))) + xve_warn(priv, "cm post srq failed for buf %d", wr_id); } static inline int post_send(struct xve_dev_priv *priv, @@ -666,10 +668,11 @@ int xve_cm_send(struct net_device *dev, struct sk_buff *skb, struct xve_cm_buf *tx_req; u64 addr; int ret = NETDEV_TX_OK; + uint32_t wr_id; if (unlikely(skb->len > tx->mtu + VLAN_ETH_HLEN)) { xve_warn(priv, - "packet len %d (> %d) too long to send, dropping\n", + "packet len %d (> %d) too long to send, dropping", skb->len, tx->mtu); INC_TX_DROP_STATS(priv, dev); INC_TX_ERROR_STATS(priv, dev); @@ -678,7 +681,7 @@ int xve_cm_send(struct net_device *dev, struct sk_buff *skb, } xve_dbg_data(priv, - "sending packet: head 0x%x length %d connection 0x%x\n", + "sending packet: head 0x%x length %d connection 0x%x", tx->tx_head, skb->len, tx->qp->qp_num); /* @@ -688,7 +691,8 @@ int xve_cm_send(struct net_device *dev, struct sk_buff *skb, * means we have to make sure everything is properly recorded and * our state is consistent before we call post_send(). */ - tx_req = &tx->tx_ring[tx->tx_head & (priv->xve_sendq_size - 1)]; + wr_id = tx->tx_head & (priv->xve_sendq_size - 1); + tx_req = &tx->tx_ring[wr_id]; tx_req->skb = skb; addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { @@ -699,10 +703,10 @@ int xve_cm_send(struct net_device *dev, struct sk_buff *skb, } tx_req->mapping[0] = addr; - if (unlikely(post_send(priv, tx, tx->tx_head & - (priv->xve_sendq_size - 1), + if (unlikely(post_send(priv, tx, wr_id, addr, skb->len))) { - xve_warn(priv, "post_send failed\n"); + xve_warn(priv, "QP[%d] post_send failed wr_id:%d ctx:%p", + tx->qp->qp_num, wr_id, tx); INC_TX_ERROR_STATS(priv, dev); xve_cm_tx_buf_free(priv, tx_req); } else { @@ -714,7 +718,7 @@ int xve_cm_send(struct net_device *dev, struct sk_buff *skb, tx->qp->qp_num); if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) xve_warn(priv, - "request notify on send CQ failed\n"); + "request notify on send CQ failed"); priv->counters[XVE_TX_RING_FULL_COUNTER]++; priv->counters[XVE_TX_QUEUE_STOP_COUNTER]++; netif_stop_queue(dev); @@ -736,7 +740,7 @@ void xve_cm_handle_tx_wc(struct net_device *dev, wr_id, wc->status); if (unlikely(wr_id >= priv->xve_sendq_size)) { - xve_warn(priv, "cm send completion event with wrid %d (> %d)\n", + xve_warn(priv, "cm send completion event with wrid %d (> %d)", wr_id, priv->xve_sendq_size); return; } @@ -755,9 +759,14 @@ void xve_cm_handle_tx_wc(struct net_device *dev, } if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) { - pr_err("%s: failed cm send event ", priv->xve_name); - pr_err("(status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); + if (wc->status != IB_WC_RNR_RETRY_EXC_ERR) + xve_warn(priv, "QP[%x] failed cm send event status:%d wrid:%d vend_err:%x", + wc->qp->qp_num, wc->status, wr_id, + wc->vendor_err); + else + xve_debug(DEBUG_CM_INFO, priv, "QP[%x] status:%d wrid:%d vend_err:%x", + wc->qp->qp_num, wc->status, wr_id, + wc->vendor_err); xve_cm_destroy_tx_deferred(tx); } netif_tx_unlock(dev); @@ -819,8 +828,8 @@ void xve_cm_dev_stop(struct net_device *dev) spin_unlock_irq(&priv->lock); ret = ib_modify_qp(p->qp, &xve_cm_err_attr, IB_QP_STATE); if (ret) - xve_warn(priv, "unable to move qp to error state: %d\n", - ret); + xve_warn(priv, "QP[%x] unable to move error state[%d]", + p->qp ? p->qp->qp_num : 0, ret); spin_lock_irq(&priv->lock); } @@ -831,7 +840,7 @@ void xve_cm_dev_stop(struct net_device *dev) !list_empty(&priv->cm.rx_flush_list) || !list_empty(&priv->cm.rx_drain_list)) { if (time_after(jiffies, begin + 5 * HZ)) { - xve_warn(priv, "RX drain timing out\n"); + xve_warn(priv, "RX drain timing out"); /* * assume the HW is wedged and just free up everything. @@ -871,7 +880,7 @@ static int xve_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) p->mtu = be32_to_cpu(data->mtu); if (p->mtu <= ETH_HLEN) { - xve_warn(priv, "Rejecting connection: mtu %d <= %d\n", + xve_warn(priv, "Rejecting connection: mtu %d <= %d", p->mtu, ETH_HLEN); return -EINVAL; } @@ -879,26 +888,26 @@ static int xve_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) qp_attr.qp_state = IB_QPS_RTR; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { - xve_warn(priv, "failed to init QP attr for RTR: %d\n", ret); + xve_warn(priv, "failed to init QP attr for RTR: %d", ret); return ret; } qp_attr.rq_psn = 0; /* FIXME */ ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); if (ret) { - xve_warn(priv, "failed to modify QP to RTR: %d\n", ret); + xve_warn(priv, "failed to modify QP to RTR: %d", ret); return ret; } qp_attr.qp_state = IB_QPS_RTS; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { - xve_warn(priv, "failed to init QP attr for RTS: %d\n", ret); + xve_warn(priv, "failed to init QP attr for RTS: %d", ret); return ret; } ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); if (ret) { - xve_warn(priv, "failed to modify QP to RTS: %d\n", ret); + xve_warn(priv, "failed to modify QP to RTS: %d", ret); return ret; } @@ -914,7 +923,7 @@ static int xve_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) skb->dev = p->netdev; if (dev_queue_xmit(skb)) { xve_warn(priv, "dev_queue_xmit failed "); - xve_warn(priv, "to requeue packet\n"); + xve_warn(priv, "to requeue packet"); } else { xve_dbg_data(priv, "%s Succefully sent skb\n", __func__); @@ -924,7 +933,7 @@ static int xve_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) ret = ib_send_cm_rtu(cm_id, NULL, 0); if (ret) { - xve_warn(priv, "failed to send RTU: %d\n", ret); + xve_warn(priv, "failed to send RTU: %d", ret); return ret; } return 0; @@ -998,7 +1007,7 @@ static int xve_cm_modify_tx_init(struct net_device *dev, ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); if (ret) { - xve_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret); + xve_warn(priv, "pkey 0x%x not found: %d", priv->pkey, ret); return ret; } @@ -1010,7 +1019,7 @@ static int xve_cm_modify_tx_init(struct net_device *dev, ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { - xve_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); + xve_warn(priv, "failed to modify tx QP to INIT: %d", ret); return ret; } return 0; @@ -1023,7 +1032,7 @@ static int xve_cm_tx_init(struct xve_cm_ctx *p, struct ib_sa_path_rec *pathrec) p->tx_ring = vmalloc(priv->xve_sendq_size * sizeof(*p->tx_ring)); if (IS_ERR(p->tx_ring)) { - xve_warn(priv, "failed to allocate tx ring\n"); + xve_warn(priv, "failed to allocate tx ring"); ret = -ENOMEM; goto err_tx; } @@ -1032,34 +1041,31 @@ static int xve_cm_tx_init(struct xve_cm_ctx *p, struct ib_sa_path_rec *pathrec) p->qp = xve_cm_create_tx_qp(p->netdev, p); if (IS_ERR(p->qp)) { ret = PTR_ERR(p->qp); - xve_warn(priv, "failed to allocate tx qp: %d\n", ret); + xve_warn(priv, "failed to allocate tx qp: %d", ret); goto err_qp; } p->id = ib_create_cm_id(priv->ca, xve_cm_tx_handler, p); if (IS_ERR(p->id)) { ret = PTR_ERR(p->id); - xve_warn(priv, "failed to create tx cm id: %d\n", ret); + xve_warn(priv, "failed to create tx cm id: %d", ret); goto err_id; } ret = xve_cm_modify_tx_init(p->netdev, p->id, p->qp); if (ret) { - xve_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); + xve_warn(priv, "failed to modify tx qp to rtr: %d", ret); goto err_modify; } ret = xve_cm_send_req(p->netdev, p->id, p->qp, pathrec); if (ret) { - xve_warn(priv, "failed to send cm req: %d\n", ret); + xve_warn(priv, "failed to send cm req: %d", ret); goto err_send_cm; } - xve_debug(DEBUG_CM_INFO, priv, "%s Request connection", __func__); - xve_debug(DEBUG_CM_INFO, priv, "0x%x for gid", p->qp->qp_num); - xve_debug(DEBUG_CM_INFO, priv, "%pI6 net_id 0x%x\n", pathrec->dgid.raw, - priv->net_id); - + pr_info("%s QP[%x] Tx Created path %pI6 ctx:%p\n", priv->xve_name, + p->qp->qp_num, pathrec->dgid.raw, p); return 0; err_send_cm: @@ -1081,11 +1087,12 @@ static void xve_cm_tx_destroy(struct xve_cm_ctx *p) struct xve_cm_buf *tx_req; unsigned long begin; unsigned long flags = 0; + uint32_t qp_num = p->qp ? p->qp->qp_num : 0; + + xve_debug(DEBUG_CM_INFO, priv, + "QP[%x] ctx:%p Destroy active conn head[0x%x] tail[0x%x]", + qp_num, p, p->tx_head, p->tx_tail); - xve_debug(DEBUG_CM_INFO, priv, "%s Destroy active conn", __func__); - xve_debug(DEBUG_CM_INFO, priv, "0x%x head", p->qp ? p->qp->qp_num : 0); - xve_debug(DEBUG_CM_INFO, priv, " 0x%x tail 0x%x\n", p->tx_head, - p->tx_tail); if (p->id) ib_destroy_cm_id(p->id); @@ -1115,7 +1122,6 @@ timeout: while ((int)p->tx_tail - (int)p->tx_head < 0) { tx_req = &p->tx_ring[p->tx_tail & (priv->xve_sendq_size - 1)]; - ++p->tx_tail; spin_unlock_irqrestore(&priv->lock, flags); @@ -1134,9 +1140,8 @@ timeout: } spin_unlock_irqrestore(&priv->lock, flags); - pr_info("%s [xve %s] Destroyed active con", __func__, priv->xve_name); - pr_info("qp [0x%x] head", p->qp ? p->qp->qp_num : 0); - pr_info("0x%x tail 0x%x\n", p->tx_head, p->tx_tail); + xve_warn(priv, "QP[%x] Destroyed, head[0x%x] tail[0x%x]", + qp_num, p->tx_head, p->tx_tail); if (p->qp) ib_destroy_qp(p->qp); if (p->tx_ring) @@ -1145,7 +1150,8 @@ timeout: kfree(p); } -static int xve_cm_tx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) +static int xve_cm_tx_handler(struct ib_cm_id *cm_id, + struct ib_cm_event *event) { struct xve_cm_ctx *tx = cm_id->context; struct xve_dev_priv *priv; @@ -1162,13 +1168,13 @@ static int xve_cm_tx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) dev = priv->netdev; switch (event->event) { case IB_CM_DREQ_RECEIVED: - xve_debug(DEBUG_CM_INFO, priv, "%s DREQ received QP %x\n", + xve_debug(DEBUG_CM_INFO, priv, "%s DREQ received QP %x", __func__, tx->qp ? tx->qp->qp_num : 0); ib_send_cm_drep(cm_id, NULL, 0); break; case IB_CM_REP_RECEIVED: - xve_debug(DEBUG_CM_INFO, priv, "%s REP received QP %x\n", + xve_debug(DEBUG_CM_INFO, priv, "%s REP received QP %x", __func__, tx->qp ? tx->qp->qp_num : 0); ret = xve_cm_rep_handler(cm_id, event); if (ret) @@ -1178,7 +1184,7 @@ static int xve_cm_tx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) case IB_CM_REQ_ERROR: case IB_CM_REJ_RECEIVED: case IB_CM_TIMEWAIT_EXIT: - pr_info("%s CM event %d [dev %s] QP %x\n", __func__, + pr_info("%s CM event %d [dev %s] QP %x", __func__, event->event, dev->name, tx->qp ? tx->qp->qp_num : 0); netif_tx_lock_bh(dev); /* @@ -1327,7 +1333,7 @@ void xve_cm_stale_task(struct work_struct *work) spin_unlock_irq(&priv->lock); ret = ib_modify_qp(p->qp, &xve_cm_err_attr, IB_QP_STATE); if (ret) - xve_warn(priv, "unable to move qp to error state: %d\n", + xve_warn(priv, "unable to move qp to error state: %d", ret); spin_lock_irq(&priv->lock); } @@ -1398,7 +1404,7 @@ int xve_cm_dev_init(struct net_device *dev) min_t(int, ALIGN((priv->admin_mtu + VLAN_ETH_HLEN), PAGE_SIZE) / PAGE_SIZE, attr.max_srq_sge); - xve_debug(DEBUG_CM_INFO, priv, "%s max_srq_sge=%d\n", __func__, + xve_debug(DEBUG_CM_INFO, priv, "%s max_srq_sge=%d", __func__, attr.max_srq_sge); xve_cm_create_srq(dev, attr.max_srq_sge); @@ -1406,7 +1412,7 @@ int xve_cm_dev_init(struct net_device *dev) priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x20; priv->cm.num_frags = attr.max_srq_sge; xve_debug(DEBUG_CM_INFO, priv, - "%s max_cm_mtu = 0x%x, num_frags=%d\n", __func__, + "%s max_cm_mtu = 0x%x, num_frags=%d", __func__, priv->cm.max_cm_mtu, priv->cm.num_frags); } else { pr_notice("XVE: Non-SRQ mode not supported\n"); @@ -1422,17 +1428,14 @@ int xve_cm_dev_init(struct net_device *dev) priv->cm. srq_ring[i].mapping)) { xve_warn(priv, - "%s failed to allocate rc ", - __func__); - xve_warn(priv, - "receive buffer %d\n", i); + "%s failed to allocate rbuf rc%d", + __func__, i); xve_cm_dev_cleanup(dev); return -ENOMEM; } if (xve_cm_post_receive_srq(dev, i)) { - xve_warn(priv, "xve_cm_post_receive_srq "); - xve_warn(priv, "failed for buf %d\n", i); + xve_warn(priv, "SRQ post failed buf:%d", i); xve_cm_dev_cleanup(dev); return -EIO; } @@ -1450,11 +1453,11 @@ void xve_cm_dev_cleanup(struct net_device *dev) if (!priv->cm_supported || !priv->cm.srq) return; - xve_debug(DEBUG_CM_INFO, priv, "%s Cleanup xve CM\n", __func__); + xve_debug(DEBUG_CM_INFO, priv, "%s Cleanup xve CM", __func__); ret = ib_destroy_srq(priv->cm.srq); if (ret) - xve_warn(priv, "ib_destroy_srq failed: %d\n", ret); + xve_warn(priv, "ib_destroy_srq failed: %d", ret); priv->cm.srq = NULL; if (!priv->cm.srq_ring) diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_ethtool.c b/drivers/infiniband/ulp/xsigo/xve/xve_ethtool.c index 728c5d3284268..d3a9e996b1496 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_ethtool.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_ethtool.c @@ -81,7 +81,8 @@ static int xve_set_coalesce(struct net_device *dev, coal->rx_coalesce_usecs); if (ret) { - xve_warn(priv, "failed modifying CQ (%d)\n", ret); + xve_debug(DEBUG_INSTALL_INFO, priv, + "failed modifying CQ (%d)\n", ret); return ret; } diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_ib.c b/drivers/infiniband/ulp/xsigo/xve/xve_ib.c index 09b822cc23da2..4954c84a45360 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_ib.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_ib.c @@ -574,6 +574,9 @@ static int poll_rx(struct xve_dev_priv *priv, int num_polls, int *done, int n, i; n = ib_poll_cq(priv->recv_cq, num_polls, priv->ibwc); + if (n < 0) + xve_warn(priv, "%s ib_poll_cq() failed, rc %d", + __func__, n); for (i = 0; i < n; ++i) { /* * Convert any successful completions to flush @@ -606,17 +609,19 @@ int xve_poll(struct napi_struct *napi, int budget) done = 0; - priv->counters[XVE_NAPI_POLL_COUNTER]++; /* * If not connected complete it */ if (!(test_bit(XVE_OPER_UP, &priv->state) || test_bit(XVE_HBEAT_LOST, &priv->state))) { + priv->counters[XVE_NAPI_DROP_COUNTER]++; napi_complete(&priv->napi); clear_bit(XVE_INTR_ENABLED, &priv->state); return 0; } + priv->counters[XVE_NAPI_POLL_COUNTER]++; + poll_more: while (done < budget) { int max = (budget - done); @@ -625,6 +630,9 @@ poll_more: t = min(XVE_NUM_WC, max); n = ib_poll_cq(priv->recv_cq, t, priv->ibwc); + if (n < 0) + xve_warn(priv, "%s ib_poll_cq() failed, rc %d", + __func__, n); for (i = 0; i < n; i++) { struct ib_wc *wc = priv->ibwc + i; @@ -789,7 +797,9 @@ int xve_send(struct net_device *dev, struct sk_buff *skb, void *phead; int ret = NETDEV_TX_OK; u8 packet_sent = 0; + int id; + id = priv->tx_head & (priv->xve_sendq_size - 1); if (skb_is_gso(skb)) { hlen = skb_transport_offset(skb) + tcp_hdrlen(skb); phead = skb->data; @@ -798,10 +808,7 @@ int xve_send(struct net_device *dev, struct sk_buff *skb, "%s linear data too small dropping %ld packets %s\n", __func__, dev->stats.tx_dropped, dev->name); - INC_TX_DROP_STATS(priv, dev); - xve_put_ah_refcnt(address); - dev_kfree_skb_any(skb); - return ret; + goto drop_pkt; } } else { int max_packet_len; @@ -813,14 +820,10 @@ int xve_send(struct net_device *dev, struct sk_buff *skb, max_packet_len = priv->mcast_mtu + VLAN_ETH_HLEN; if (unlikely(skb->len > max_packet_len)) { - xve_warn(priv, "%s packet len %d", __func__, skb->len); - xve_warn(priv, "(> %d) too long to", max_packet_len); - xve_warn(priv, "send,dropping %ld packets %s\n", - dev->stats.tx_dropped, dev->name); - INC_TX_DROP_STATS(priv, dev); - xve_put_ah_refcnt(address); - dev_kfree_skb_any(skb); - return ret; + xve_info(priv, + "packet len %d (>%d) too long dropping", + skb->len, max_packet_len); + goto drop_pkt; } phead = NULL; hlen = 0; @@ -836,7 +839,7 @@ int xve_send(struct net_device *dev, struct sk_buff *skb, * means we have to make sure everything is properly recorded and * our state is consistent before we call post_send(). */ - tx_req = &priv->tx_ring[priv->tx_head & (priv->xve_sendq_size - 1)]; + tx_req = &priv->tx_ring[id]; tx_req->skb = skb; tx_req->ah = address; if (unlikely(xve_dma_map_tx(priv->ca, tx_req))) { @@ -850,8 +853,8 @@ int xve_send(struct net_device *dev, struct sk_buff *skb, /* Queue almost full */ if (++priv->tx_outstanding == priv->xve_sendq_size) { xve_dbg_data(priv, - "%s stop queue head%d out%d tail%d type%d", - __func__, priv->tx_head, priv->tx_tail, + "%s stop queue id%d head%d tail%d out%d type%d", + __func__, id, priv->tx_head, priv->tx_tail, priv->tx_outstanding, type); if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) xve_warn(priv, "%s Req notify on send CQ failed\n", @@ -875,10 +878,10 @@ int xve_send(struct net_device *dev, struct sk_buff *skb, if (unlikely(post_send(priv, priv->tx_head & (priv->xve_sendq_size - 1), address->ah, qpn, tx_req, phead, hlen))) { - xve_warn(priv, "%s post_send failed head%d tail%d out%d type%d\n", - __func__, priv->tx_head, priv->tx_tail, - priv->tx_outstanding, type); --priv->tx_outstanding; + xve_warn(priv, "%s post_send failed id%d head%d tail%d out%d type%d", + __func__, id, priv->tx_head, priv->tx_tail, + priv->tx_outstanding, type); priv->counters[XVE_TX_RING_FULL_COUNTER]++; xve_put_ah_refcnt(address); xve_free_txbuf_memory(priv, tx_req); @@ -900,6 +903,12 @@ int xve_send(struct net_device *dev, struct sk_buff *skb, if (packet_sent) priv->counters[XVE_TX_COUNTER]++; return ret; + +drop_pkt: + INC_TX_DROP_STATS(priv, dev); + xve_put_ah_refcnt(address); + dev_kfree_skb_any(skb); + return ret; } static void __xve_reap_ah(struct net_device *dev) @@ -1171,7 +1180,7 @@ int xve_ib_dev_stop(struct net_device *dev, int flush) xve_debug(DEBUG_IBDEV_INFO, priv, "%s All sends and receives done\n", __func__); timeout: - xve_warn(priv, "Deleting TX timer"); + xve_debug(DEBUG_IBDEV_INFO, priv, "Deleting TX timer\n"); del_timer_sync(&priv->poll_timer); qp_attr.qp_state = IB_QPS_RESET; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_main.c b/drivers/infiniband/ulp/xsigo/xve/xve_main.c index dad67fff015cb..2eebf570c9e45 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_main.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_main.c @@ -182,7 +182,7 @@ int xve_open(struct net_device *netdev) struct xve_dev_priv *priv = netdev_priv(netdev); unsigned long flags = 0; - pr_info("XVE: %s Bringing interface up %s\n", __func__, priv->xve_name); + xve_info(priv, "Bringing interface up"); priv->counters[XVE_OPEN_COUNTER]++; spin_lock_irqsave(&priv->lock, flags); @@ -225,7 +225,7 @@ static int xve_stop(struct net_device *netdev) struct xve_dev_priv *priv = netdev_priv(netdev); unsigned long flags = 0; - pr_info("XVE: %s Stopping interface %s\n", __func__, priv->xve_name); + xve_info(priv, "Stopping interface"); spin_lock_irqsave(&priv->lock, flags); clear_bit(XVE_FLAG_ADMIN_UP, &priv->flags); @@ -239,8 +239,9 @@ static int xve_stop(struct net_device *netdev) xve_xsmp_send_oper_state(priv, priv->resource_id, XSMP_XVE_OPER_DOWN); - pr_info("XVE: %s Finished Stopping interface %s\n", __func__, - priv->xve_name); + xve_debug(DEBUG_IBDEV_INFO, priv, + "%s Stopped interface %s\n", __func__, + priv->xve_name); return 0; } @@ -248,8 +249,8 @@ int xve_modify_mtu(struct net_device *netdev, int new_mtu) { struct xve_dev_priv *priv = netdev_priv(netdev); - pr_info("XVE: %s changing mtu from %d to %d\n", - priv->xve_name, priv->admin_mtu, new_mtu); + xve_info(priv, "changing mtu from %d to %d", + priv->admin_mtu, new_mtu); if (new_mtu == netdev->mtu) return 0; @@ -645,6 +646,7 @@ static void path_rec_completion(int status, while ((skb = __skb_dequeue(&uplink_skqueue))) { skb->dev = dev; xve_get_ah_refcnt(path->ah); + priv->counters[XVE_PATHREC_GW_COUNTER]++; /* Sending the queued GATEWAY Packet */ ret = xve_send(dev, skb, path->ah, priv->gw.t_data_qp, 2); if (ret == NETDEV_TX_BUSY) { @@ -1193,8 +1195,7 @@ static void xve_io_disconnect(struct xve_dev_priv *priv) spin_unlock_irqrestore(&priv->lock, flags); if (test_bit(XVE_OS_ADMIN_UP, &priv->state)) napi_synchronize(&priv->napi); - pr_info("%s Flushing mcast [xve :%s]\n", __func__, - priv->xve_name); + xve_info(priv, "%s Flushing mcast", __func__); xve_queue_work(priv, XVE_WQ_START_FLUSHNORMAL); } else { spin_unlock_irqrestore(&priv->lock, flags); @@ -1538,7 +1539,8 @@ static int xve_xsmp_send_notification(struct xve_dev_priv *priv, u64 vid, xsmp_msg = (struct xve_xsmp_msg *)(msg + sizeof(*header)); if (notifycmd == XSMP_XVE_OPER_UP) { - pr_info("XVE: %s sending updated mtu for %s[mtu %d]\n", + xve_debug(DEBUG_INSTALL_INFO, priv, + "XVE: %s sending updated mtu for %s[mtu %d]\n", __func__, priv->xve_name, priv->admin_mtu); xsmp_msg->vn_mtu = cpu_to_be16(priv->admin_mtu); xsmp_msg->net_id = cpu_to_be32(priv->net_id); @@ -1597,7 +1599,7 @@ static int xve_state_machine(struct xve_dev_priv *priv) XVE_HBEAT_LOSS_THRES*priv->hb_interval)) { unsigned long flags = 0; - xve_warn(priv, "Heart Beat Loss: %lu:%lu\n", + xve_info(priv, "Heart Beat Loss: %lu:%lu\n", jiffies, (unsigned long)priv->last_hbeat + 3*priv->hb_interval*HZ); @@ -1743,23 +1745,17 @@ xve_set_edr_features(struct xve_dev_priv *priv) priv->netdev->hw_features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_GRO; - pr_info("XVE: %s %s flags[%x]\n", - __func__, priv->xve_name, priv->hca_caps); + xve_info(priv, "%s HCA capability flags[%x]", + __func__, priv->hca_caps); if (xve_enable_offload & (priv->is_eoib && priv->is_titan)) { if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { - pr_info("XVE: %s Setting checksum offload %s[%x]\n", - __func__, priv->xve_name, priv->hca_caps); set_bit(XVE_FLAG_CSUM, &priv->flags); priv->netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; } - if (priv->hca_caps & IB_DEVICE_UD_TSO) { - pr_info("XVE: %s Setting TSO offload %s[%x]\n", - __func__, priv->xve_name, priv->hca_caps); + if (priv->hca_caps & IB_DEVICE_UD_TSO) priv->netdev->hw_features |= NETIF_F_TSO; - } - } priv->netdev->features |= priv->netdev->hw_features; @@ -1777,14 +1773,10 @@ int xve_set_dev_features(struct xve_dev_priv *priv, struct ib_device *hca) priv->lro_mode = 1; if (priv->vnet_mode == XVE_VNET_MODE_RC) { - pr_info("XVE: %s Setting RC mode for %s\n", __func__, - priv->xve_name); strcpy(priv->mode, "connected(RC)"); set_bit(XVE_FLAG_ADMIN_CM, &priv->flags); priv->cm_supported = 1; } else {/* UD */ - pr_info("XVE: %s Setting UD mode for %s\n", __func__, - priv->xve_name); strcpy(priv->mode, "datagram(UD)"); /* MTU will be reset when mcast join happens */ @@ -1793,6 +1785,8 @@ int xve_set_dev_features(struct xve_dev_priv *priv, struct ib_device *hca) priv->netdev->mtu = XVE_UD_MTU(priv->max_ib_mtu); priv->lro_mode = 0; } + xve_info(priv, "%s Mode:%d MTU:%d", __func__, + priv->vnet_mode, priv->netdev->mtu); priv->mcast_mtu = priv->admin_mtu = priv->netdev->mtu; xg_setup_pseudo_device(priv->netdev, hca); @@ -1849,7 +1843,7 @@ void xve_remove_one(struct xve_dev_priv *priv) int count = 0; - pr_info("XVE:%s Removing xve interface %s\n", __func__, priv->xve_name); + xve_info(priv, "%s Removing xve interface", __func__); ib_unregister_event_handler(&priv->event_handler); cancel_delayed_work_sync(&priv->stale_task); rtnl_lock(); @@ -1857,15 +1851,13 @@ void xve_remove_one(struct xve_dev_priv *priv) rtnl_unlock(); vmk_notify_uplink(priv->netdev); unregister_netdev(priv->netdev); - pr_info("XVE:%s Unregistered xve interface %s\n", __func__, - priv->xve_name); + xve_info(priv, "%s Unregistered xve interface ", __func__); /* Wait for reference count to go zero */ while (atomic_read(&priv->ref_cnt) && xve_continue_unload()) { count++; if (count > 20) { - pr_info("%s: Waiting for refcnt to become", __func__); - pr_info("zero [xve: %s] %d\n", - priv->xve_name, atomic_read(&priv->ref_cnt)); + xve_info(priv, "Waiting for refcnt to become zero %d", + atomic_read(&priv->ref_cnt)); count = 0; } msleep(1000); @@ -1979,7 +1971,8 @@ int xve_xsmp_send_oper_state(struct xve_dev_priv *priv, u64 vid, int state) int ret; char *str = state == XSMP_XVE_OPER_UP ? "UP" : "DOWN"; - pr_info("XVE: %s Sending OPER state [%d:%s] to %s\n", + xve_debug(DEBUG_INSTALL_INFO, priv, + "XVE: %s Sending OPER state [%d:%s] to %s\n", __func__, state, str, priv->xve_name); if (state == XSMP_XVE_OPER_UP) { set_bit(XVE_OPER_REP_SENT, &priv->state); @@ -2091,9 +2084,12 @@ static int xve_xsmp_send_ack(struct xve_dev_priv *priv, xmsgp->tca_qkey = cpu_to_be16(priv->gw.t_qkey); } } - pr_info("XVE: %s ACK back with admin mtu ", __func__); - pr_info("%d for %s", xmsgp->vn_mtu, priv->xve_name); - pr_info("[netid %d ]\n", xmsgp->net_id); + xve_debug(DEBUG_INSTALL_INFO, priv, + "XVE: %s ACK back with admin mtu ", __func__); + xve_debug(DEBUG_INSTALL_INFO, priv, + "%d for %s", xmsgp->vn_mtu, priv->xve_name); + xve_debug(DEBUG_INSTALL_INFO, priv, + "[netid %d ]\n", xmsgp->net_id); memcpy(msg + sizeof(*m_header), xmsgp, sizeof(*xmsgp)); @@ -2173,8 +2169,8 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp, /* * Duplicate VID, send ACK, send oper state update */ - XSMP_ERROR - ("%s: Duplicate XVE install message name: %s, VID=0x%llx\n", + xve_debug(DEBUG_INSTALL_INFO, priv, + "%s: Duplicate XVE install message name: %s, VID=0x%llx\n", __func__, xmsgp->xve_name, be64_to_cpu(xmsgp->resource_id)); ret = -EEXIST; @@ -2220,10 +2216,6 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp, } priv = netdev_priv(netdev); - pr_info("XVE: %s Installing xve %s - ", __func__, xmsgp->xve_name); - pr_info("resource id %llx", be64_to_cpu(xmsgp->resource_id)); - pr_info("priv DS %p\n", priv); - xcpm_get_xsmp_session_info(xsmp_hndl, &priv->xsmp_info); hca = priv->xsmp_info.ib_device; port = xscore_port_num(priv->xsmp_info.port); @@ -2232,7 +2224,6 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp, (xmsgp->vnet_mode); priv->net_id = be32_to_cpu(xmsgp->net_id); priv->netdev->mtu = be16_to_cpu(xmsgp->vn_mtu); - pr_info("XVE: %s MTU %d - ", __func__, priv->netdev->mtu); priv->resource_id = be64_to_cpu(xmsgp->resource_id); priv->mp_flag = be16_to_cpu(xmsgp->mp_flag); priv->install_flag = be32_to_cpu(xmsgp->install_flag); @@ -2250,6 +2241,9 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp, priv->is_titan = (is_titan) ? 1 : 0; priv->is_jumbo = (is_jumbo) ? 1 : 0; + pr_info("Install VNIC:%s rID:%llx pDS:%p NetId:%d", + xmsgp->xve_name, be64_to_cpu(xmsgp->resource_id), + priv, priv->net_id); /* Make Send and Recv Queue parmaters Per Vnic */ if (!(priv->vnet_mode & XVE_VNET_MODE_UD)) { priv->xve_sendq_size = xve_sendq_size; @@ -2309,14 +2303,6 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp, goto device_init_failed; } - pr_info("XVE: %s adding vnic %s ", - __func__, priv->xve_name); - pr_info("net_id %d vnet_mode %d type%d eoib[%s]", - priv->net_id, priv->vnet_mode, priv->vnic_type, - priv->is_eoib ? "Yes" : "no"); - pr_info("port %d net_id_be %d\n", port, net_id_be); - pr_info("MTU port%d active%d\n", priv->port_attr.max_mtu, - priv->port_attr.active_mtu); memcpy(priv->bcast_mgid.raw, bcast_mgid, sizeof(union ib_gid)); if (xve_is_edr(priv)) { @@ -2349,7 +2335,6 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp, priv->pkey |= 0x8000; } - pr_info("MGID: %pI6 pkey%d\n", &priv->bcast_mgid.raw, priv->pkey); if (xve_set_dev_features(priv, hca)) goto device_init_failed; @@ -2420,8 +2405,13 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp, queue_sm_work(priv, 0); - pr_info("%s Successfully created xve [%s]\n", __func__, - xmsgp->xve_name); + pr_info("%s Install Success: vnet_mode:%d type:%d eoib[%s] HPort:%d\n", + priv->xve_name, priv->vnet_mode, priv->vnic_type, + priv->is_eoib ? "Yes" : "no", port); + pr_info("VNIC:%s MTU[%d:%d:%d] MGID:%pI6 pkey:%d\n", priv->xve_name, + priv->netdev->mtu, priv->port_attr.max_mtu, + priv->port_attr.active_mtu, + &priv->bcast_mgid.raw, priv->pkey); send_ack: ret = xve_xsmp_send_ack(priv, xmsgp); @@ -2431,9 +2421,8 @@ send_ack: be64_to_cpu(xmsgp->resource_id)); } if (update_state && priv->vnic_type == XSMP_XCM_OVN) { - printk - ("XVE: %s Sending Oper state to chassis for %s id %llx\n", - __func__, priv->xve_name, priv->resource_id); + xve_info(priv, "Sending Oper state to chassis for id %llx\n", + priv->resource_id); (void)xve_xsmp_handle_oper_req(priv->xsmp_hndl, priv->resource_id); } @@ -2599,9 +2588,11 @@ xve_xsmp_vnic_ready(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp, __func__, xmsgp->xve_name); return -1; } - pr_info("XVE VNIC_READY: vnic_type: %u, subnet_prefix: %llx\n", + xve_debug(DEBUG_INSTALL_INFO, priv, + "XVE VNIC_READY: vnic_type: %u, subnet_prefix: %llx\n", priv->vnic_type, priv->gw.t_gid.global.subnet_prefix); - pr_info("TCA ctrl_qp: %u, data_qp: %u, pkey: %x, qkey: %x\n", + xve_debug(DEBUG_INSTALL_INFO, priv, + "TCA ctrl_qp: %u, data_qp: %u, pkey: %x, qkey: %x\n", priv->gw.t_ctrl_qp, priv->gw.t_data_qp, priv->gw.t_pkey, priv->gw.t_qkey); diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_multicast.c b/drivers/infiniband/ulp/xsigo/xve/xve_multicast.c index 917ceafe74db3..8b4fa113f2f98 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_multicast.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_multicast.c @@ -176,7 +176,7 @@ static int xve_mcast_join_finish(struct xve_mcast *mcast, spin_unlock_irq(&priv->lock); priv->tx_wr.wr.ud.remote_qkey = (priv->is_eoib == 1) ? priv->port_qkey : priv->qkey; - xve_warn(priv, "setting remote_qkey %x", + xve_dbg_mcast(priv, "setting remote_qkey %x", priv->tx_wr.wr.ud.remote_qkey); set_qkey = 1; @@ -580,8 +580,8 @@ void xve_mcast_join_task(struct work_struct *work) spin_unlock_irq(&priv->lock); if (!xve_cm_admin_enabled(dev)) { - printk - ("XVE: %s xve %s dev mtu %d, admin_mtu %d, mcast_mtu %d\n", + xve_info(priv, + "XVE: %s xve %s dev mtu %d, admin_mtu %d, mcast_mtu %d\n", __func__, priv->xve_name, priv->netdev->mtu, priv->admin_mtu, priv->mcast_mtu); if (!priv->is_jumbo) @@ -701,8 +701,8 @@ int xve_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb, mcast = xve_mcast_alloc(dev, 0); if (!mcast) { - xve_warn(priv, "unable to allocate memory for "); - xve_warn(priv, "multicast structure\n"); + xve_warn(priv, + "%s unable to allocate memory", __func__); INC_TX_DROP_STATS(priv, dev); dev_kfree_skb_any(skb); goto out; diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_stats.c b/drivers/infiniband/ulp/xsigo/xve/xve_stats.c index c3fcb0c951b44..ff67fd07f2c94 100755 --- a/drivers/infiniband/ulp/xsigo/xve/xve_stats.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_stats.c @@ -64,6 +64,7 @@ static char *counter_name[XVE_MAX_COUNTERS] = { "state_machine_down count:\t", "state_machine_ibclear count:\t", "napi_poll_count:\t\t", + "napi_drop_count:\t\t", "short_tx_pkt_count:\t\t", "tx_skb_count:\t\t\t", "tx skb free count:\t\t", @@ -143,6 +144,7 @@ static char *counter_name[XVE_MAX_COUNTERS] = { "pathrec query count:\t\t", "pathrec resp count:\t\t", "pathrec resp err count:\t\t", + "pathrec gw packet count:\t\t", "ib sm_change count:\t\t", "ib client_reregister count:\t", "ib port_err count:\t\t", @@ -474,7 +476,7 @@ static int xve_proc_read_device(struct seq_file *m, void *data) seq_printf(m, "Receive Queue size: \t\t%d\n", vp->xve_recvq_size); seq_printf(m, "Transmit Queue size: \t\t%d\n", vp->xve_sendq_size); seq_printf(m, "Receive CQ size: \t\t%d\n", vp->xve_rcq_size); - seq_printf(m, "Transmit CQ size: \t\t%d\n", vp->xve_scq_size); + seq_printf(m, "TX CQ size:\t\t\t%d\n", vp->xve_scq_size); if (vp->cm_supported) { seq_printf(m, "Num of cm frags: \t\t%d\n", vp->cm.num_frags); @@ -589,6 +591,11 @@ static int xve_proc_read_device(struct seq_file *m, void *data) else strcat(tmp_buf, " + IB Device Not Opened"); + if (test_bit(XVE_HBEAT_LOST, &vp->state)) + strcat(tmp_buf, " + HeartBeat Lost"); + else + strcat(tmp_buf, " + HeartBeat Active"); + if (test_bit(XVE_OVER_QUOTA, &vp->state)) strcat(tmp_buf, " + No RX Quota"); @@ -597,6 +604,11 @@ static int xve_proc_read_device(struct seq_file *m, void *data) if (vp->work_queue_failed != 0) seq_printf(m, "WQ Failed:\t\t\t%ld\n", vp->work_queue_failed); + seq_printf(m, "TX Net queue \t\t%s %d:%d\n", + netif_queue_stopped(vp->netdev) ? "stopped" : "active", + vp->counters[XVE_TX_WAKE_UP_COUNTER], + vp->counters[XVE_TX_QUEUE_STOP_COUNTER]); + seq_printf(m, "Counters cleared count:\t\t%u\n", vp->counters_cleared); if (xve_is_uplink(vp)) { diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_tables.c b/drivers/infiniband/ulp/xsigo/xve/xve_tables.c index bf6a1eed4f8ef..37e5db4896ee4 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_tables.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_tables.c @@ -168,10 +168,9 @@ int xve_aging_task_machine(struct xve_dev_priv *priv) (XVE_FWT_ENTRY_REFRESH, &fwt_entry->state) && ((jiffies - fwt_entry->last_refresh) >= priv->aging_delay)) { - pr_info("XVE: %s MAC ", priv->xve_name); - pr_info("%pM", smac); - pr_info(" vlan %d Aged out\n", - fwt_entry->vlan); + xve_info(priv, + "MAC %pM vlan %d Aged out", + smac, fwt_entry->vlan); /* * Can there be a race here where path * becomes a bad address when paths @@ -259,11 +258,12 @@ void xve_fwt_insert(struct xve_dev_priv *priv, struct xve_cm_ctx *ctx, (fwt_entry->dgid.raw, gid->raw, sizeof(union ib_gid)))) { print_mgid_buf(from, (char *)fwt_entry->dgid.raw); print_mgid_buf(to, (char *)gid->raw); - pr_info("XVE: %s MAC %pM ", - priv->xve_name, smac); - pr_info(" vlan %d moved from GID %s to GID %s\n", - fwt_entry->vlan, from, to); - + xve_debug(DEBUG_FWTABLE_INFO, priv, + "XVE: %s MAC %pM ", + priv->xve_name, smac); + xve_debug(DEBUG_FWTABLE_INFO, priv, + "vlan %d moved from GID %s to GID %s\n", + fwt_entry->vlan, from, to); priv->counters[XVE_MAC_MOVED_COUNTER]++; memcpy(fwt_entry->dgid.raw, gid->raw, @@ -300,8 +300,10 @@ void xve_fwt_insert(struct xve_dev_priv *priv, struct xve_cm_ctx *ctx, } memset(fwt_entry, 0, sizeof(struct xve_fwt_entry)); print_mgid_buf(from, (char *)gid->raw); - pr_info("XVE: %s MAC %pM", priv->xve_name, smac); - pr_info("vlan %d learned from GID %s, mode: %s QPN %x Fwt %p\n", + xve_debug(DEBUG_FWTABLE_INFO, priv, + "XVE: %s MAC %pM", priv->xve_name, smac); + xve_debug(DEBUG_FWTABLE_INFO, priv, + "vlan %d learned from GID %s, mode: %s QPN %x Fwt %p\n", vlan, from, qpn ? "UD" : "RC", qpn, fwt_entry); priv->counters[XVE_MAC_LEARN_COUNTER]++; memcpy(fwt_entry->dgid.raw, gid->raw, sizeof(union ib_gid)); @@ -388,9 +390,8 @@ void xve_fwt_cleanup(struct xve_dev_priv *priv) xve_fwt->num--; } } - pr_info("XVE: %s Forwarding table cleaned up for %s", - __func__, priv->xve_name); - pr_info("number of entries %d\n", xve_fwt->num); + xve_info(priv, "Forwarding table cleaned up entries:%d", + xve_fwt->num); spin_unlock_irqrestore(&xve_fwt->lock, flags); } diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_verbs.c b/drivers/infiniband/ulp/xsigo/xve/xve_verbs.c index 4ae7cb1824f15..aa311508970b3 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_verbs.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_verbs.c @@ -178,10 +178,10 @@ int xve_transport_dev_init(struct net_device *dev, struct ib_device *ca) } /* Bug 24673784 */ - if (priv->is_titan && xve_use_hugecq) { + if (priv->is_titan && xve_use_hugecq) priv->xve_rcq_size = priv->xve_scq_size = xve_use_hugecq; - } else { + else { size = priv->xve_sendq_size; size += priv->xve_recvq_size + 1; /* 1 extra for rx_drain_qp */ priv->xve_rcq_size = size;