]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
xsigo: Resize uVNIC/PVI CQ size
authorPradeep Gopanapalli <pradeep.gopanapalli@oracle.com>
Tue, 1 Nov 2016 19:36:41 +0000 (19:36 +0000)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 3 Nov 2016 17:37:44 +0000 (10:37 -0700)
Orabug: 24765034

uVNIC/PVI should avoid CQ overflow condition

Resize CQ's to 16k to handle multiple connections
flushed simultaneously per path.

Increase Send Queue and receive Queue to 2k for
better performance.

Added counters to print CQ sizes.
Added stats to count RC completions.

Reported-by: scarlett chen <scarlett.chen@oracle.com>
Signed-off-by: Pradeep Gopanapalli <pradeep.gopanapalli@oracle.com>
Reviewed-by: Aravind Kini <aravind.kini@oracle.com>
Reviewed-by: viswa krishnamurthy <viswa.krishnamurthy@oracle.com>
Reviewed-by: Manish Kumar Singh <mk.singh@oracle.com>
Reviewed-by: UmaShankar Tumari Mahabalagiri <umashankar.mahabalagiri@oracle.com>
drivers/infiniband/ulp/xsigo/xve/xve.h
drivers/infiniband/ulp/xsigo/xve/xve_cm.c
drivers/infiniband/ulp/xsigo/xve/xve_main.c
drivers/infiniband/ulp/xsigo/xve/xve_stats.c
drivers/infiniband/ulp/xsigo/xve/xve_verbs.c

index 1be1cd7006412407564ed3d5eeea7394b1f4aa60..038be751dd031a9a8b30faa3cfdc03eb10098261 100644 (file)
@@ -159,8 +159,8 @@ enum {
        XVE_CM_BUF_SIZE = XVE_CM_MTU + VLAN_ETH_HLEN,
        XVE_CM_HEAD_SIZE = XVE_CM_BUF_SIZE % PAGE_SIZE,
        XVE_CM_RX_SG = ALIGN(XVE_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
-       XVE_RX_RING_SIZE = 256,
-       XVE_TX_RING_SIZE = 128,
+       XVE_RX_RING_SIZE = 2048,
+       XVE_TX_RING_SIZE = 2048,
        XVE_MAX_QUEUE_SIZE = 8192,
        XVE_MIN_QUEUE_SIZE = 2,
        XVE_CM_MAX_CONN_QP = 4096,
@@ -292,6 +292,8 @@ enum {
 
        XVE_TX_UD_COUNTER,
        XVE_TX_RC_COUNTER,
+       XVE_RC_RXCOMPL_COUNTER,
+       XVE_RC_TXCOMPL_COUNTER,
        XVE_TX_MCAST_PKT,
        XVE_TX_BCAST_PKT,
        XVE_TX_MCAST_ARP_QUERY,
@@ -322,6 +324,7 @@ enum {
        XVE_HBEAT_COUNTER,
        XVE_LINK_STATUS_COUNTER,
        XVE_RX_NOGRH,
+       XVE_DUP_VID_COUNTER,
 
        XVE_MAX_COUNTERS
 };
@@ -750,6 +753,8 @@ struct xve_dev_priv {
        /* TX and RX Ring attributes */
        int xve_recvq_size;
        int xve_sendq_size;
+       int xve_rcq_size;
+       int xve_scq_size;
        int xve_max_send_cqe;
        struct xve_rx_buf *rx_ring;
        struct xve_tx_buf *tx_ring;
index b6a6d1c0e328d7664cca4904a5df23ed0181a50e..d3853d039e006875f22f83c9e44817ca02e656d4 100644 (file)
@@ -620,6 +620,7 @@ copied:
                        skb->data[10], skb->data[11],
                        skb->protocol);
        update_cm_rx_rate(p, skb->len);
+       priv->counters[XVE_RC_RXCOMPL_COUNTER]++;
        xve_send_skb(priv, skb);
 repost:
        if (unlikely(xve_cm_post_receive_srq(dev, wr_id))) {
@@ -745,6 +746,7 @@ void xve_cm_handle_tx_wc(struct net_device *dev,
 
        netif_tx_lock(dev);
        ++tx->tx_tail;
+       priv->counters[XVE_RC_TXCOMPL_COUNTER]++;
        if (unlikely(--priv->tx_outstanding == priv->xve_sendq_size >> 1) &&
            netif_queue_stopped(dev) &&
            test_bit(XVE_FLAG_ADMIN_UP, &priv->flags)) {
index 53f93f8718ea34354ae6e89a2abd05b43a35c5eb..dd8dfc48c125798d2828cb1461fb265127e93080 100644 (file)
@@ -2148,6 +2148,7 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp,
                ret = -EEXIST;
                update_state = 1;
                priv->xsmp_hndl = xsmp_hndl;
+               priv->counters[XVE_DUP_VID_COUNTER]++;
                goto send_ack;
        }
 
index 77f833576747f83b7f4952f1620cfbc2da4b63f2..c3fcb0c951b4402bc12ba62938befbe2243a2c96 100755 (executable)
@@ -126,6 +126,8 @@ static char *counter_name[XVE_MAX_COUNTERS] = {
        "mcast detach count:\t\t",
        "tx ud count:\t\t\t",
        "tx rc count:\t\t\t",
+       "rc tx compl count:\t\t\t",
+       "rc rx compl count:\t\t\t",
        "tx mcast count:\t\t\t",
        "tx broadcast count:\t\t\t",
        "tx arp count:\t\t\t",
@@ -152,6 +154,7 @@ static char *counter_name[XVE_MAX_COUNTERS] = {
        "Heartbeat Count(0x8919):\t\t",
        "Link State message count:\t",
        "RX frames without GRH\t\t",
+       "Duplicate xve install count:\t"
 };
 
 static char *misc_counter_name[XVE_MISC_MAX_COUNTERS] = {
@@ -470,11 +473,13 @@ static int xve_proc_read_device(struct seq_file *m, void *data)
 
        seq_printf(m, "Receive Queue size: \t\t%d\n", vp->xve_recvq_size);
        seq_printf(m, "Transmit Queue size: \t\t%d\n", vp->xve_sendq_size);
-       seq_printf(m, "Completion Queue size: \t\t%d\n", vp->xve_max_send_cqe);
+       seq_printf(m, "Receive CQ size: \t\t%d\n", vp->xve_rcq_size);
+       seq_printf(m, "Transmit CQ size: \t\t%d\n", vp->xve_scq_size);
 
        if (vp->cm_supported) {
                seq_printf(m, "Num of cm frags: \t\t%d\n", vp->cm.num_frags);
                seq_printf(m, "CM mtu  \t\t\t%d\n", vp->cm.max_cm_mtu);
+               seq_printf(m, "CM SRQ \t\t\t%s\n", (vp->cm.srq) ? "yes" : "no");
        }
 
        seq_puts(m, "\n");
index 8851c0a037aa737bedbe67e67ac80a222b71c406..4ae7cb1824f152c45983c428ed545bd4319d70b7 100644 (file)
@@ -36,6 +36,9 @@
 static int xve_max_inline_data = 128;
 module_param(xve_max_inline_data, int, 0644);
 
+static int xve_use_hugecq = 16384;
+module_param(xve_use_hugecq, int, 0644);
+
 int xve_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid,
                     int set_qkey)
 {
@@ -151,7 +154,7 @@ int xve_transport_dev_init(struct net_device *dev, struct ib_device *ca)
                .qp_type = IB_QPT_UD
        };
        struct ethtool_coalesce *coal;
-       int ret, size, max_sge = MAX_SKB_FRAGS + 1;
+       int ret, size = 0, max_sge = MAX_SKB_FRAGS + 1;
        int i;
 
        priv->pd = ib_alloc_pd(priv->ca);
@@ -167,7 +170,6 @@ int xve_transport_dev_init(struct net_device *dev, struct ib_device *ca)
                goto out_free_pd;
        }
 
-       size = priv->xve_recvq_size + 1;
        ret = xve_cm_dev_init(dev);
        if (ret != 0) {
                pr_err("%s Failed for %s [ret %d ]\n", __func__,
@@ -175,24 +177,32 @@ int xve_transport_dev_init(struct net_device *dev, struct ib_device *ca)
                goto out_free_mr;
        }
 
-       size += priv->xve_sendq_size;
-       size = priv->xve_recvq_size + 1;        /* 1 extra for rx_drain_qp */
+       /* Bug 24673784 */
+       if (priv->is_titan && xve_use_hugecq) {
+               priv->xve_rcq_size = priv->xve_scq_size =
+                               xve_use_hugecq;
+       } else {
+               size = priv->xve_sendq_size;
+               size += priv->xve_recvq_size + 1; /* 1 extra for rx_drain_qp */
+               priv->xve_rcq_size = size;
+               priv->xve_scq_size = priv->xve_sendq_size;
+       }
 
        /* Create Receive CompletionQueue */
-       priv->recv_cq =
-           ib_create_cq(priv->ca, xve_ib_completion, NULL, dev, size, 0);
+       priv->recv_cq = ib_create_cq(priv->ca, xve_ib_completion, NULL,
+                                    dev, priv->xve_rcq_size, 0);
        if (IS_ERR(priv->recv_cq)) {
-               pr_warn("%s: failed to create receive CQ for %s\n",
-                       ca->name, priv->xve_name);
+               pr_warn("%s: failed to create receive CQ for %s size%d\n",
+                       ca->name, priv->xve_name, priv->xve_rcq_size);
                goto out_free_mr;
        }
 
        /* Create Send CompletionQueue */
        priv->send_cq = ib_create_cq(priv->ca, xve_send_comp_handler, NULL,
-                                    dev, priv->xve_sendq_size, 0);
+                                    dev, priv->xve_scq_size, 0);
        if (IS_ERR(priv->send_cq)) {
-               pr_warn("%s: failed to create send CQ for %s\n",
-                       ca->name, priv->xve_name);
+               pr_warn("%s: failed to create send CQ for %s size%d\n",
+                       ca->name, priv->xve_name, priv->xve_scq_size);
                goto out_free_recv_cq;
        }