From: Pradeep Gopanapalli Date: Tue, 1 Nov 2016 19:36:41 +0000 (+0000) Subject: xsigo: Resize uVNIC/PVI CQ size X-Git-Tag: v4.1.12-92~45^2~4 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=5bf5832177a5be098cae5344b9991f26d3d472ff;p=users%2Fjedix%2Flinux-maple.git xsigo: Resize uVNIC/PVI CQ size Orabug: 24765034 uVNIC/PVI should avoid CQ overflow condition Resize CQ's to 16k to handle multiple connections flushed simultaneously per path. Increase Send Queue and receive Queue to 2k for better performance. Added counters to print CQ sizes. Added stats to count RC completions. Reported-by: scarlett chen Signed-off-by: Pradeep Gopanapalli Reviewed-by: Aravind Kini Reviewed-by: viswa krishnamurthy Reviewed-by: Manish Kumar Singh Reviewed-by: UmaShankar Tumari Mahabalagiri --- diff --git a/drivers/infiniband/ulp/xsigo/xve/xve.h b/drivers/infiniband/ulp/xsigo/xve/xve.h index 1be1cd700641..038be751dd03 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve.h +++ b/drivers/infiniband/ulp/xsigo/xve/xve.h @@ -159,8 +159,8 @@ enum { XVE_CM_BUF_SIZE = XVE_CM_MTU + VLAN_ETH_HLEN, XVE_CM_HEAD_SIZE = XVE_CM_BUF_SIZE % PAGE_SIZE, XVE_CM_RX_SG = ALIGN(XVE_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE, - XVE_RX_RING_SIZE = 256, - XVE_TX_RING_SIZE = 128, + XVE_RX_RING_SIZE = 2048, + XVE_TX_RING_SIZE = 2048, XVE_MAX_QUEUE_SIZE = 8192, XVE_MIN_QUEUE_SIZE = 2, XVE_CM_MAX_CONN_QP = 4096, @@ -292,6 +292,8 @@ enum { XVE_TX_UD_COUNTER, XVE_TX_RC_COUNTER, + XVE_RC_RXCOMPL_COUNTER, + XVE_RC_TXCOMPL_COUNTER, XVE_TX_MCAST_PKT, XVE_TX_BCAST_PKT, XVE_TX_MCAST_ARP_QUERY, @@ -322,6 +324,7 @@ enum { XVE_HBEAT_COUNTER, XVE_LINK_STATUS_COUNTER, XVE_RX_NOGRH, + XVE_DUP_VID_COUNTER, XVE_MAX_COUNTERS }; @@ -750,6 +753,8 @@ struct xve_dev_priv { /* TX and RX Ring attributes */ int xve_recvq_size; int xve_sendq_size; + int xve_rcq_size; + int xve_scq_size; int xve_max_send_cqe; struct xve_rx_buf *rx_ring; struct xve_tx_buf *tx_ring; diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_cm.c b/drivers/infiniband/ulp/xsigo/xve/xve_cm.c index b6a6d1c0e328..d3853d039e00 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_cm.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_cm.c @@ -620,6 +620,7 @@ copied: skb->data[10], skb->data[11], skb->protocol); update_cm_rx_rate(p, skb->len); + priv->counters[XVE_RC_RXCOMPL_COUNTER]++; xve_send_skb(priv, skb); repost: if (unlikely(xve_cm_post_receive_srq(dev, wr_id))) { @@ -745,6 +746,7 @@ void xve_cm_handle_tx_wc(struct net_device *dev, netif_tx_lock(dev); ++tx->tx_tail; + priv->counters[XVE_RC_TXCOMPL_COUNTER]++; if (unlikely(--priv->tx_outstanding == priv->xve_sendq_size >> 1) && netif_queue_stopped(dev) && test_bit(XVE_FLAG_ADMIN_UP, &priv->flags)) { diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_main.c b/drivers/infiniband/ulp/xsigo/xve/xve_main.c index 53f93f8718ea..dd8dfc48c125 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_main.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_main.c @@ -2148,6 +2148,7 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp, ret = -EEXIST; update_state = 1; priv->xsmp_hndl = xsmp_hndl; + priv->counters[XVE_DUP_VID_COUNTER]++; goto send_ack; } diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_stats.c b/drivers/infiniband/ulp/xsigo/xve/xve_stats.c index 77f833576747..c3fcb0c951b4 100755 --- a/drivers/infiniband/ulp/xsigo/xve/xve_stats.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_stats.c @@ -126,6 +126,8 @@ static char *counter_name[XVE_MAX_COUNTERS] = { "mcast detach count:\t\t", "tx ud count:\t\t\t", "tx rc count:\t\t\t", + "rc tx compl count:\t\t\t", + "rc rx compl count:\t\t\t", "tx mcast count:\t\t\t", "tx broadcast count:\t\t\t", "tx arp count:\t\t\t", @@ -152,6 +154,7 @@ static char *counter_name[XVE_MAX_COUNTERS] = { "Heartbeat Count(0x8919):\t\t", "Link State message count:\t", "RX frames without GRH\t\t", + "Duplicate xve install count:\t" }; static char *misc_counter_name[XVE_MISC_MAX_COUNTERS] = { @@ -470,11 +473,13 @@ static int xve_proc_read_device(struct seq_file *m, void *data) seq_printf(m, "Receive Queue size: \t\t%d\n", vp->xve_recvq_size); seq_printf(m, "Transmit Queue size: \t\t%d\n", vp->xve_sendq_size); - seq_printf(m, "Completion Queue size: \t\t%d\n", vp->xve_max_send_cqe); + seq_printf(m, "Receive CQ size: \t\t%d\n", vp->xve_rcq_size); + seq_printf(m, "Transmit CQ size: \t\t%d\n", vp->xve_scq_size); if (vp->cm_supported) { seq_printf(m, "Num of cm frags: \t\t%d\n", vp->cm.num_frags); seq_printf(m, "CM mtu \t\t\t%d\n", vp->cm.max_cm_mtu); + seq_printf(m, "CM SRQ \t\t\t%s\n", (vp->cm.srq) ? "yes" : "no"); } seq_puts(m, "\n"); diff --git a/drivers/infiniband/ulp/xsigo/xve/xve_verbs.c b/drivers/infiniband/ulp/xsigo/xve/xve_verbs.c index 8851c0a037aa..4ae7cb1824f1 100644 --- a/drivers/infiniband/ulp/xsigo/xve/xve_verbs.c +++ b/drivers/infiniband/ulp/xsigo/xve/xve_verbs.c @@ -36,6 +36,9 @@ static int xve_max_inline_data = 128; module_param(xve_max_inline_data, int, 0644); +static int xve_use_hugecq = 16384; +module_param(xve_use_hugecq, int, 0644); + int xve_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid, int set_qkey) { @@ -151,7 +154,7 @@ int xve_transport_dev_init(struct net_device *dev, struct ib_device *ca) .qp_type = IB_QPT_UD }; struct ethtool_coalesce *coal; - int ret, size, max_sge = MAX_SKB_FRAGS + 1; + int ret, size = 0, max_sge = MAX_SKB_FRAGS + 1; int i; priv->pd = ib_alloc_pd(priv->ca); @@ -167,7 +170,6 @@ int xve_transport_dev_init(struct net_device *dev, struct ib_device *ca) goto out_free_pd; } - size = priv->xve_recvq_size + 1; ret = xve_cm_dev_init(dev); if (ret != 0) { pr_err("%s Failed for %s [ret %d ]\n", __func__, @@ -175,24 +177,32 @@ int xve_transport_dev_init(struct net_device *dev, struct ib_device *ca) goto out_free_mr; } - size += priv->xve_sendq_size; - size = priv->xve_recvq_size + 1; /* 1 extra for rx_drain_qp */ + /* Bug 24673784 */ + if (priv->is_titan && xve_use_hugecq) { + priv->xve_rcq_size = priv->xve_scq_size = + xve_use_hugecq; + } else { + size = priv->xve_sendq_size; + size += priv->xve_recvq_size + 1; /* 1 extra for rx_drain_qp */ + priv->xve_rcq_size = size; + priv->xve_scq_size = priv->xve_sendq_size; + } /* Create Receive CompletionQueue */ - priv->recv_cq = - ib_create_cq(priv->ca, xve_ib_completion, NULL, dev, size, 0); + priv->recv_cq = ib_create_cq(priv->ca, xve_ib_completion, NULL, + dev, priv->xve_rcq_size, 0); if (IS_ERR(priv->recv_cq)) { - pr_warn("%s: failed to create receive CQ for %s\n", - ca->name, priv->xve_name); + pr_warn("%s: failed to create receive CQ for %s size%d\n", + ca->name, priv->xve_name, priv->xve_rcq_size); goto out_free_mr; } /* Create Send CompletionQueue */ priv->send_cq = ib_create_cq(priv->ca, xve_send_comp_handler, NULL, - dev, priv->xve_sendq_size, 0); + dev, priv->xve_scq_size, 0); if (IS_ERR(priv->send_cq)) { - pr_warn("%s: failed to create send CQ for %s\n", - ca->name, priv->xve_name); + pr_warn("%s: failed to create send CQ for %s size%d\n", + ca->name, priv->xve_name, priv->xve_scq_size); goto out_free_recv_cq; }