]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
xsigo: Implementing Jumbo MTU support
authorPradeep Gopanapalli <pradeep.gopanapalli@oracle.com>
Tue, 1 Nov 2016 19:27:06 +0000 (19:27 +0000)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 3 Nov 2016 17:36:46 +0000 (10:36 -0700)
Orabug: 24928804

With Titan and Saturn supporting Jumbo Infiniband frames
uVNIC can have MTU greater than 4k and upto 10k.

Allocate multiple pages for Receive descriptors code changes
for handling multiple page mapping and unmapping.

Took proper care for enabling Jumbo MTU only for Titan and only
in EoiB mode.

If Jumbo MTU is used for non-Titan cards uVNIC driver will NACK
the Install and OFOS will display a failure message for the
install.

Added stats to display Jumbo & removed legacy EoiB HeartBeat code.

Reported-by: Pradeep Gopanapalli <pradeep.gopanapalli@oracle.com>
Signed-off-by: Pradeep Gopanapalli <pradeep.gopanapalli@oracle.com>
Reviewed-by: sajid zia <szia@oracle.com>
drivers/infiniband/ulp/xsigo/xscore/Makefile
drivers/infiniband/ulp/xsigo/xsvhba/Makefile
drivers/infiniband/ulp/xsigo/xsvnic/Makefile
drivers/infiniband/ulp/xsigo/xve/Makefile
drivers/infiniband/ulp/xsigo/xve/xve.h
drivers/infiniband/ulp/xsigo/xve/xve_ib.c
drivers/infiniband/ulp/xsigo/xve/xve_main.c
drivers/infiniband/ulp/xsigo/xve/xve_multicast.c
drivers/infiniband/ulp/xsigo/xve/xve_stats.c
drivers/infiniband/ulp/xsigo/xve/xve_verbs.c
drivers/infiniband/ulp/xsigo/xve/xve_xsmp_msgs.h

index c263dd50cab7e33e2f9fd0f79c22f666411d8bd0..59f4757d18edca5c648274b0a7dfa2c42cd26660 100644 (file)
@@ -2,7 +2,7 @@ obj-$(CONFIG_INFINIBAND_XSCORE) := xscore.o
 xscore-y := xscore_impl.o xs_ud.o xscore_api.o xsmp.o \
            xscore_stats.o xscore_uadm.o
 
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8022\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8029\"
 ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
 ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
 ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
index 33d74f7958c249a61412191e5048111526c209a8..7900b401d044f56f7323710da9c9880aeca5e91b 100644 (file)
@@ -3,7 +3,7 @@ xsvhba-y := vhba_main.o vhba_xsmp.o vhba_create.o vhba_init.o vhba_delete.o \
            vhba_attr.o vhba_wq.o vhba_proc.o vhba_stats.o vhba_ib.o        \
            vhba_scsi_intf.o vhba_align.o
 
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8022\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8029\"
 ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
 ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
 ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
index 963d9f84c526b1da918f987e84b4c795be91fffe..f9e33c532184591d1ea32bd8f68c7595549ae738 100644 (file)
@@ -1,7 +1,7 @@
 obj-$(CONFIG_INFINIBAND_XSVNIC) := xsvnic.o
 xsvnic-y := xsvnic_main.o xsvnic_stats.o
 
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8022\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8029\"
 ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
 ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
 ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
index 764cf0db49f20f6bafd63997942243a97d0bcd6d..50b46acbcb5c7f7f58f36078f7d49ae75db113f2 100644 (file)
@@ -2,7 +2,7 @@ obj-$(CONFIG_INFINIBAND_XVE) := xve.o
 xve-y := xve_main.o xve_verbs.o xve_multicast.o xve_ib.o xve_tables.o \
         xve_ethtool.o xve_cm.o xve_stats.o
 
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8022\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8029\"
 ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
 ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
 ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
index e10394ccab03336bf5ddcc22d3d2b8d192821989..a31f5219003bacd26d7d4334b516fabb86412d89 100644 (file)
@@ -84,6 +84,7 @@
 #include <rdma/ib_pack.h>
 #include <rdma/ib_sa.h>
 #include <rdma/ib_cache.h>
+#include <rdma/sif_verbs.h>
 
 #include "xscore.h"
 #include "hash.h"
@@ -153,7 +154,7 @@ enum xve_flush_level {
 enum {
        XVE_UD_HEAD_SIZE = IB_GRH_BYTES + VLAN_ETH_HLEN + XVE_EOIB_LEN,
        XVE_UD_RX_OVN_SG = 2,   /* max buffer needed for 4K mtu */
-       XVE_UD_RX_EDR_SG = 3,   /* max buffer needed for 10K mtu */
+       XVE_UD_RX_EDR_SG = 4,   /* max buffer needed for 10K mtu */
        XVE_CM_MTU = 0x10000 - 0x20,    /* padding to align header to 16 */
        XVE_CM_BUF_SIZE = XVE_CM_MTU + VLAN_ETH_HLEN,
        XVE_CM_HEAD_SIZE = XVE_CM_BUF_SIZE % PAGE_SIZE,
@@ -552,26 +553,6 @@ enum {
        XVE_CM_ESTD_TX
 };
 
-/* Extension bits in the qp create mask to ib_create_qp
- */
-enum xve_qp_create_flags {
-       /* Indicate that this is an Ethernet over IB QP */
-       IB_QP_CREATE_EOIB            = 1 << 4,
-       /* Enable receive side scaling */
-       IB_QP_CREATE_RSS             = 1 << 5,
-       /* Enable header/data split for offloading */
-       IB_QP_CREATE_HDR_SPLIT       = 1 << 6,
-       /* Enable receive side dynamic mtu */
-       IB_QP_CREATE_RCV_DYNAMIC_MTU = 1 << 7,
-       /* Enable a special EPSA proxy */
-       IB_QP_CREATE_PROXY           = 1 << 8,
-       /* No csum for qp, wqe.wr.csum = qp.magic */
-       IB_QP_NO_CSUM                = 1 << 9,
-       /* Enable receive side dynamic mtu */
-       IB_QP_CREATE_SND_DYNAMIC_MTU = 1 << 10,
-};
-
-/* CM Statistics */
 struct xve_cm_stats {
        unsigned long tx_jiffies;
        unsigned long rx_jiffies;
@@ -795,6 +776,7 @@ struct xve_dev_priv {
        u8 vnet_mode;
        u8 vnic_type;
        u8 is_eoib;
+       u8 is_jumbo;
        char xve_name[XVE_MAX_NAME_SIZE];
        struct xve_gw_info gw;
 
index 4b553ea689f65f8744a46c91483d38952959e466..73c71114ab5c082bfb8fc10911b7b5badf5db380 100644 (file)
@@ -71,12 +71,16 @@ void xve_free_ah(struct kref *kref)
 static void xve_ud_dma_unmap_rx(struct xve_dev_priv *priv,
                                u64 mapping[XVE_UD_RX_EDR_SG])
 {
+       int i = 0;
+
        if (xve_ud_need_sg(priv->admin_mtu)) {
                ib_dma_unmap_single(priv->ca, mapping[0], XVE_UD_HEAD_SIZE,
                                    DMA_FROM_DEVICE);
-               ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
-                                 DMA_FROM_DEVICE);
-               xve_counters[XVE_NUM_PAGES_ALLOCED]--;
+               for (i = 1; i < xve_ud_rx_sg(priv); i++) {
+                       ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE,
+                                       DMA_FROM_DEVICE);
+                       xve_counters[XVE_NUM_PAGES_ALLOCED]--;
+               }
        } else {
                ib_dma_unmap_single(priv->ca, mapping[0],
                                    XVE_UD_BUF_SIZE(priv->max_ib_mtu),
@@ -89,34 +93,47 @@ static void xve_ud_skb_put_frags(struct xve_dev_priv *priv,
                struct sk_buff *skb,
                unsigned int length)
 {
-       if (xve_ud_need_sg(priv->admin_mtu)) {
-               skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
-               unsigned int size;
-               /*
-                * There is only two buffers needed for max_payload = 4K,
-                * first buf size is XVE_UD_HEAD_SIZE
-                */
-               skb->tail += XVE_UD_HEAD_SIZE;
-               skb->len  += length;
+       int i, num_frags;
+       unsigned int size, hdr_space = XVE_UD_HEAD_SIZE;
 
-               size = length - XVE_UD_HEAD_SIZE;
+       /* put header into skb */
+       if (xve_ud_need_sg(priv->admin_mtu))
+               size = min(length, hdr_space);
+       else
+               size = length;
 
-               skb_frag_size_set(frag, size);
-               skb->data_len += size;
-               skb->truesize += PAGE_SIZE;
-       } else
-               skb_put(skb, length);
+               skb->tail += size;
+       skb->len += size;
+       length -= size;
+
+       num_frags = skb_shinfo(skb)->nr_frags;
+       for (i = 0; i < num_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               if (length == 0) {
+                       __free_page(skb_shinfo(skb)->frags[i].page.p);
+                       --skb_shinfo(skb)->nr_frags;
+               } else {
+                       size = min_t(unsigned, length, PAGE_SIZE);
+
+                       frag->size = size;
+                       skb->data_len += size;
+                       skb->truesize += size;
+                       skb->len += size;
+                       length -= size;
+               }
+       }
 }
 
 static int xve_ib_post_receive(struct net_device *dev, int id)
 {
        struct xve_dev_priv *priv = netdev_priv(dev);
        struct ib_recv_wr *bad_wr;
-       int ret;
+       int ret, i;
 
        priv->rx_wr.wr_id = id | XVE_OP_RECV;
-       priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
-       priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
+       for (i = 0; i < xve_ud_rx_sg(priv); i++)
+               priv->rx_sge[i].addr = priv->rx_ring[id].mapping[i];
 
        ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
        if (unlikely(ret)) {
@@ -135,7 +152,7 @@ static struct sk_buff *xve_alloc_rx_skb(struct net_device *dev, int id)
        struct sk_buff *skb;
        int buf_size, align;
        u64 *mapping;
-       int tailroom;
+       int tailroom, i;
 
        if (xve_ud_need_sg(priv->admin_mtu)) {
                /* reserve some tailroom for IP/TCP headers */
@@ -165,17 +182,20 @@ static struct sk_buff *xve_alloc_rx_skb(struct net_device *dev, int id)
        if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
                goto error;
 
-       if (xve_ud_need_sg(priv->admin_mtu)) {
+       for (i = 1; xve_ud_need_sg(priv->admin_mtu) &&
+                       i < xve_ud_rx_sg(priv); i++) {
                struct page *page = xve_alloc_page(GFP_ATOMIC);
 
                if (!page)
                        goto partial_error;
-               skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
-               mapping[1] =
-                   ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page.p,
-                                   0, PAGE_SIZE, DMA_FROM_DEVICE);
-               if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
+               skb_fill_page_desc(skb, i-1, page, 0, PAGE_SIZE);
+               mapping[i] =
+                       ib_dma_map_page(priv->ca,
+                                       skb_shinfo(skb)->frags[i-1].page.p,
+                                       0, PAGE_SIZE, DMA_FROM_DEVICE);
+               if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i])))
                        goto partial_error;
+
        }
 
        priv->rx_ring[id].skb = skb;
@@ -373,6 +393,7 @@ xve_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
                goto repost;
        }
 
+
        vlan = xg_vlan_get_rxtag(skb);
        if (wc->wc_flags & IB_WC_GRH) {
                xve_fwt_insert(priv, NULL, &grhhdr->source_gid, wc->src_qp,
@@ -698,17 +719,20 @@ static inline int post_send(struct xve_dev_priv *priv,
        skb_frag_t *frags = skb_shinfo(skb)->frags;
        int nr_frags = skb_shinfo(skb)->nr_frags;
        u64 *mapping = tx_req->mapping;
+       int total_size = 0;
 
        if (skb_headlen(skb)) {
                priv->tx_sge[0].addr = mapping[0];
                priv->tx_sge[0].length = skb_headlen(skb);
                off = 1;
+               total_size += skb_headlen(skb);
        } else
                off = 0;
 
        for (i = 0; i < nr_frags; ++i) {
                priv->tx_sge[i + off].addr = mapping[i + off];
                priv->tx_sge[i + off].length = frags[i].size;
+               total_size += frags[i].size;
        }
        wr->num_sge = nr_frags + off;
        wr->wr_id = wr_id;
@@ -728,6 +752,10 @@ static inline int post_send(struct xve_dev_priv *priv,
                wr->opcode = IB_WR_SEND;
        }
 
+       xve_debug(DEBUG_TXDATA_INFO, priv, "wr_id %d Frags %d size %d\n",
+                       wr_id, wr->num_sge, total_size);
+       dumppkt(skb->data + sizeof(struct xve_eoib_hdr), total_size,
+                       "Post Send Dump");
        return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
 }
 /* Always called with priv->lock held
@@ -759,7 +787,15 @@ int xve_send(struct net_device *dev, struct sk_buff *skb,
                        return ret;
                }
        } else {
-               if (unlikely(skb->len > priv->mcast_mtu + VLAN_ETH_HLEN)) {
+               int max_packet_len;
+
+               if (priv->is_jumbo)
+                       max_packet_len = priv->admin_mtu + VLAN_ETH_HLEN
+                               + sizeof(struct xve_eoib_hdr);
+               else
+                       max_packet_len = priv->mcast_mtu + VLAN_ETH_HLEN;
+
+               if (unlikely(skb->len > max_packet_len)) {
                        xve_warn(priv, "%s packet len %d",  __func__, skb->len);
                        xve_warn(priv, "(> %d) too long to", priv->mcast_mtu);
                        xve_warn(priv, "send,dropping %ld packets %s\n",
index 834892da85c650bd123daf4dcca053f173375d69..45b1e2d9b3fad980c9c767aa2b878cec04f672e8 100644 (file)
@@ -132,6 +132,7 @@ MODULE_PARM_DESC(xve_tca_qkey, "tca qkey");
 unsigned int xve_ud_mode;
 module_param(xve_ud_mode, uint, 0444);
 MODULE_PARM_DESC(xve_ud_mode, "Always use UD mode irrespective of xsmp.vnet_mode value");
+
 unsigned int xve_eoib_mode = 1;
 module_param(xve_eoib_mode, uint, 0444);
 MODULE_PARM_DESC(xve_eoib_mode, "Always use UD mode irrespective of xsmp.vnet_mode value");
@@ -261,11 +262,12 @@ int xve_modify_mtu(struct net_device *netdev, int new_mtu)
                return 0;
        }
 
-       if (new_mtu > XVE_UD_MTU(priv->max_ib_mtu))
+       if (!priv->is_jumbo && (new_mtu > XVE_UD_MTU(priv->max_ib_mtu)))
                return -EINVAL;
 
-       priv->admin_mtu = new_mtu;
-       netdev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
+       priv->admin_mtu = netdev->mtu = new_mtu;
+       if (!priv->is_jumbo)
+               netdev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
        xve_queue_work(priv, XVE_WQ_START_FLUSHLIGHT);
        (void)xve_xsmp_handle_oper_req(priv->xsmp_hndl, priv->resource_id);
 
@@ -841,41 +843,8 @@ int xve_add_eoib_header(struct xve_dev_priv *priv, struct sk_buff *skb)
        eoibp = (struct xve_eoib_hdr *) skb_push(skb, len);
 
        skb_set_mac_header(skb, len);
-       if (!xve_enable_offload) {
-               eoibp->magic = cpu_to_be16(XVE_EOIB_MAGIC);
-               eoibp->tss_mask_sz = 0;
-               return 0;
-       }
-       /* encap_data = (VNIC_EOIB_HDR_VER << 4) | (VNIC_EOIB_HDR_SIG << 6)
-               From net/ethernet/mellanox/mlx4_vnic/vnic_data_tx.c */
-       eoibp->encap_data = 0x3 << 6;
-       eoibp->seg_off = eoibp->seg_id = 0;
-#define VNIC_EOIB_HDR_UDP_CHK_OK        0x2
-#define VNIC_EOIB_HDR_TCP_CHK_OK        0x1
-#define VNIC_EOIB_HDR_IP_CHK_OK         0x1
-
-#define VNIC_EOIB_HDR_SET_IP_CHK_OK(eoib_hdr)   (eoib_hdr->encap_data = \
-               (eoib_hdr->encap_data & 0xFC) | VNIC_EOIB_HDR_IP_CHK_OK)
-#define VNIC_EOIB_HDR_SET_TCP_CHK_OK(eoib_hdr)  (eoib_hdr->encap_data = \
-               (eoib_hdr->encap_data & 0xF3) | (VNIC_EOIB_HDR_TCP_CHK_OK << 2))
-#define VNIC_EOIB_HDR_SET_UDP_CHK_OK(eoib_hdr)  (eoib_hdr->encap_data = \
-               (eoib_hdr->encap_data & 0xF3) | (VNIC_EOIB_HDR_UDP_CHK_OK << 2))
-
-       switch (ntohs(skb->protocol)) {
-       case ETH_P_IP: {
-               struct iphdr *ip_h = ip_hdr(skb);
-
-               VNIC_EOIB_HDR_SET_IP_CHK_OK(eoibp);
-               if (ip_h->protocol == IPPROTO_TCP)
-                       VNIC_EOIB_HDR_SET_TCP_CHK_OK(eoibp);
-               else if (ip_h->protocol == IPPROTO_UDP)
-                       VNIC_EOIB_HDR_SET_UDP_CHK_OK(eoibp);
-               break;
-       }
-
-       case ETH_P_IPV6:
-               break;
-       }
+       eoibp->magic = cpu_to_be16(XVE_EOIB_MAGIC);
+       eoibp->tss_mask_sz = 0;
        return 0;
 }
 
@@ -1799,7 +1768,8 @@ int xve_set_dev_features(struct xve_dev_priv *priv, struct ib_device *hca)
                strcpy(priv->mode, "datagram(UD)");
 
                /* MTU will be reset when mcast join happens */
-               if (priv->netdev->mtu > XVE_UD_MTU(priv->max_ib_mtu))
+               if (!priv->is_jumbo &&
+                       (priv->netdev->mtu > XVE_UD_MTU(priv->max_ib_mtu)))
                        priv->netdev->mtu = XVE_UD_MTU(priv->max_ib_mtu);
                priv->lro_mode = 0;
        }
@@ -1941,7 +1911,6 @@ static int xve_check_for_hca(xsmp_cookie_t xsmp_hndl, u8 *is_titan)
        if (!((strncmp(hca->name, "mlx4", 4) != 0) ||
                        (strncmp(hca->name, "sif0", 4) != 0)))
                return -EEXIST;
-
        return 0;
 }
 
@@ -2147,7 +2116,7 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp,
        __be16 pkey_be;
        __be32 net_id_be;
        u8 ecode = 0;
-       u8 is_titan = 0;
+       u8 is_titan = 0, is_jumbo = 0;
 
        if (xve_check_for_hca(xsmp_hndl, &is_titan) != 0) {
                pr_info("Warning !!!!! Unsupported HCA card for xve ");
@@ -2157,6 +2126,20 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp,
                goto dup_error;
        }
 
+       if ((be16_to_cpu(xmsgp->vn_mtu) > XVE_UD_MTU(4096))
+                       && (xmsgp->vnet_mode & XVE_VNET_MODE_UD)) {
+               if (is_titan)
+                       is_jumbo = 1;
+               else {
+                       pr_info("Warning !!!!! Jumbo is supported on Titan Cards Only");
+                       pr_info("MTU%d %s\n", be16_to_cpu(xmsgp->vn_mtu),
+                               xmsgp->xve_name);
+                       ret = -EEXIST;
+                       ecode = XVE_INVALID_OPERATION;
+                       goto dup_error;
+               }
+       }
+
        priv = xve_get_xve_by_vid(be64_to_cpu(xmsgp->resource_id));
        if (priv) {
                /*
@@ -2236,6 +2219,7 @@ static int xve_xsmp_install(xsmp_cookie_t xsmp_hndl, struct xve_xsmp_msg *xmsgp,
        priv->vnic_type = xmsgp->vnic_type;
        priv->is_eoib = xve_eoib_mode ? (xmsgp->eoib_enable) : 0;
        priv->is_titan = (is_titan) ? 1 : 0;
+       priv->is_jumbo = (is_jumbo) ? 1 : 0;
 
        /* Make Send and Recv Queue parmaters Per Vnic */
        if (!(priv->vnet_mode & XVE_VNET_MODE_UD)) {
index a6b6d82594780c1e5f095e2c087e8d59b92f9211..a753a6e5f47e64b2f6cef9dbdfd231ff318be0f3 100644 (file)
@@ -584,7 +584,9 @@ void xve_mcast_join_task(struct work_struct *work)
                    ("XVE: %s xve %s dev mtu %d, admin_mtu %d, mcast_mtu %d\n",
                     __func__, priv->xve_name, priv->netdev->mtu,
                     priv->admin_mtu, priv->mcast_mtu);
-               xve_dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
+               if (!priv->is_jumbo)
+                       xve_dev_set_mtu(dev,
+                               min(priv->mcast_mtu, priv->admin_mtu));
        }
 
        xve_dbg_mcast(priv, "successfully joined all multicast groups\n");
index a85834b88844f442f5b78d9fb28139d9f4d82837..4a426b6a23af5793e12cf8e8d9677d93fe27247b 100755 (executable)
@@ -462,6 +462,9 @@ static int xve_proc_read_device(struct seq_file *m, void *data)
        seq_printf(m, "IB MAX MTU: \t\t\t%d\n", vp->max_ib_mtu);
        seq_printf(m, "SG UD Mode:\t\t\t%d\n", xve_ud_need_sg(vp->admin_mtu));
        seq_printf(m, "Max SG supported(HCA):\t\t%d\n", vp->dev_attr.max_sge);
+       seq_printf(m, "Eoib:\t\t\t\t%s\n", (vp->is_eoib) ? "yes" : "no");
+       seq_printf(m, "Jumbo:\t\t\t\t%s\n", (vp->is_jumbo) ? "yes" : "no");
+       seq_printf(m, "Titan:\t\t\t\t%s\n", (vp->is_titan) ? "yes" : "no");
 
        seq_printf(m, "Receive Queue size: \t\t%d\n", vp->xve_recvq_size);
        seq_printf(m, "Transmit Queue size: \t\t%d\n", vp->xve_sendq_size);
index d22573a135be5172befb9ad19e7ac7fd2c778b78..8851c0a037aa737bedbe67e67ac80a222b71c406 100644 (file)
@@ -33,6 +33,9 @@
 #include "xve.h"
 #include "xve_compat.h"
 
+static int xve_max_inline_data = 128;
+module_param(xve_max_inline_data, int, 0644);
+
 int xve_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid,
                     int set_qkey)
 {
@@ -225,8 +228,9 @@ int xve_transport_dev_init(struct net_device *dev, struct ib_device *ca)
 
        if (priv->is_eoib && priv->is_titan) {
                init_attr.create_flags |= IB_QP_CREATE_EOIB;
-               xve_debug(DEBUG_QP_INFO, priv, "Setting eoIB mode%x\n",
-                               init_attr.create_flags);
+               init_attr.cap.max_inline_data = xve_max_inline_data;
+               xve_debug(DEBUG_QP_INFO, priv, "Setting eoIB mode%x data%x\n",
+                               init_attr.create_flags, xve_max_inline_data);
        }
 
        priv->qp = ib_create_qp(priv->pd, &init_attr);
@@ -243,10 +247,12 @@ int xve_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        priv->tx_wr.send_flags = IB_SEND_SIGNALED;
 
        priv->rx_sge[0].lkey = priv->mr->lkey;
-       if (xve_ud_need_sg(priv->admin_mtu)) {
+        if (xve_ud_need_sg(priv->admin_mtu)) {
                priv->rx_sge[0].length = XVE_UD_HEAD_SIZE;
-               priv->rx_sge[1].length = PAGE_SIZE;
-               priv->rx_sge[1].lkey = priv->mr->lkey;
+               for (i = 1; i < xve_ud_rx_sg(priv); i++) {
+                       priv->rx_sge[i].length = PAGE_SIZE;
+                       priv->rx_sge[i].lkey = priv->mr->lkey;
+               }
                priv->rx_wr.num_sge = xve_ud_rx_sg(priv);
        } else {
                priv->rx_sge[0].length = XVE_UD_BUF_SIZE(priv->max_ib_mtu);
index c65b1b39e2e53aad4edec94864c4f24e854f5b96..93a81034e4da2e2e670bfdf74f358ca7013b049f 100644 (file)
@@ -132,7 +132,8 @@ struct xve_xsmp_msg {
 #define XVE_NACK_DUP_VID       2       /* duplicate VID */
 #define XVE_NACK_LIMIT_REACHED 3       /* Max number of XVEs reached */
 #define XVE_NACK_ALLOCATION_ERROR      4       /* Error during instantiation */
-#define XVE_NACK_CODE_MAX      5
+#define XVE_INVALID_OPERATION  6       /* Invalid Install message */
+#define XVE_NACK_CODE_MAX      7
 
 /* The common XVE XSMP header for all messages */
 struct xve_xsmp_header {