DEBUG_MISC_INFO = 0x00004000,
DEBUG_IBDEV_INFO = 0x00008000,
DEBUG_CM_INFO = 0x00010000,
- DEBUG_CTRL_INFO = 0x00020000
+ DEBUG_CTRL_INFO = 0x00020000,
+ DEBUG_QP_INFO = 0x00040000,
+ DEBUG_TX_INFO = 0x00080000,
+ DEBUG_RX_INFO = 0x00100000,
+ DEBUG_TXDATA_INFO = 0x00200000
};
#define XVE_OP_RECV (1ul << 31)
XVE_CM_ESTD_TX
};
+/* Extension bits in the qp create mask to ib_create_qp
+ */
+enum xve_qp_create_flags {
+ /* Indicate that this is an Ethernet over IB QP */
+ IB_QP_CREATE_EOIB = 1 << 4,
+ /* Enable receive side scaling */
+ IB_QP_CREATE_RSS = 1 << 5,
+ /* Enable header/data split for offloading */
+ IB_QP_CREATE_HDR_SPLIT = 1 << 6,
+ /* Enable receive side dynamic mtu */
+ IB_QP_CREATE_RCV_DYNAMIC_MTU = 1 << 7,
+ /* Enable a special EPSA proxy */
+ IB_QP_CREATE_PROXY = 1 << 8,
+ /* No csum for qp, wqe.wr.csum = qp.magic */
+ IB_QP_NO_CSUM = 1 << 9,
+ /* Enable receive side dynamic mtu */
+ IB_QP_CREATE_SND_DYNAMIC_MTU = 1 << 10,
+};
+
/* CM Statistics */
struct xve_cm_stats {
unsigned long tx_jiffies;
__be16 bcast_mlid;
u16 local_lid;
u32 qkey;
+ u32 port_qkey;
+ u8 is_titan;
/* Device attributes */
struct ib_device_attr dev_attr;
u16 mp_flag;
u8 vnet_mode;
u8 vnic_type;
+ u8 is_eoib;
char xve_name[XVE_MAX_NAME_SIZE];
struct xve_gw_info gw;
pr_info("%s DumpPacket of %d\n", name, len);
for (i = 0; i < len; i++) {
- if ((i != 0) && (i % 8 == 0)) {
+ if ((i != 0) && (i % 16 == 0)) {
pr_info("%s\n", line);
memset(line, 0, sizeof(line));
cp = line;
xve_ud_dma_unmap_rx(priv, mapping);
xve_ud_skb_put_frags(priv, skb, wc->byte_len);
grhhdr = (struct ib_packed_grh *)(skb->data);
- /* This will print packet when driver is in Debug Mode */
- dumppkt(skb->data, skb->len, "UD Packet Dump");
skb_pull(skb, IB_GRH_BYTES);
if (xve_is_edr(priv)) {
test_bit(XVE_FLAG_CSUM, &priv->flags))
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* This will print packet when driver is in Debug Mode */
+ dumppkt(skb->data, skb->len, "UD Packet Dump");
xve_test("%s RX UD pkt %02x %02x %02x %02x %02x %02x %02x %02x %02x",
__func__, skb->data[0], skb->data[1], skb->data[2],
skb->data[3], skb->data[4], skb->data[5], skb->data[6],
wr->num_sge = nr_frags + off;
wr->wr_id = wr_id;
wr->wr.ud.remote_qpn = qpn;
+ if (priv->is_eoib) {
+ wr->wr.ud.remote_qkey = priv->port_qkey;
+ xve_debug(DEBUG_TX_INFO, priv, "%s qkey to use %x",
+ __func__, wr->wr.ud.remote_qkey);
+ }
wr->wr.ud.ah = address;
if (head) {
wr->wr.ud.mss = skb_shinfo(skb)->gso_size;
}
skb_orphan(skb);
skb_dst_drop(skb);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
+ else
+ priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
+
+ xve_debug(DEBUG_TX_INFO, priv,
+ "%s sending packet, length=%d csum=%x address=%p qpn=0x%06x flags%x\n",
+ __func__, skb->len, skb->ip_summed,
+ address, qpn, priv->tx_wr.send_flags);
+
if (unlikely(post_send(priv, priv->tx_head & (priv->xve_sendq_size - 1),
address->ah, qpn, tx_req, phead, hlen))) {
xve_warn(priv, "%s post_send failed head%d tail%d out%d type%d\n",
module_param_named(ignore_hb_loss, xve_ignore_hbeat_loss, int, 0644);
MODULE_PARM_DESC(ignore_hb_loss, "Ignore heart beat loss on edr based vNICs with uplink");
-int xve_enable_offload;
+int xve_enable_offload = 1;
module_param_named(enable_offload, xve_enable_offload, int, 0444);
MODULE_PARM_DESC(enable_offload, "Enable stateless offload");
-
unsigned long xve_tca_subnet;
module_param(xve_tca_subnet, ulong, 0444);
MODULE_PARM_DESC(xve_tca_subnet, "tca subnet prefix");
unsigned int xve_ud_mode;
module_param(xve_ud_mode, uint, 0444);
MODULE_PARM_DESC(xve_ud_mode, "Always use UD mode irrespective of xsmp.vnet_mode value");
+unsigned int xve_eoib_mode = 1;
+module_param(xve_eoib_mode, uint, 0444);
+MODULE_PARM_DESC(xve_eoib_mode, "Always use UD mode irrespective of xsmp.vnet_mode value");
static void xve_send_msg_to_xsigod(xsmp_cookie_t xsmp_hndl, void *data,
int len);
priv->netdev->hw_features =
NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_GRO;
- priv->lro_mode = 1;
-
- if (xve_enable_offload) {
- if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM)
+ pr_info("XVE: %s %s flags[%x]\n",
+ __func__, priv->xve_name, priv->hca_caps);
+ if (xve_enable_offload & (priv->is_eoib && priv->is_titan)) {
+ if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
+ pr_info("XVE: %s Setting checksum offload %s[%x]\n",
+ __func__, priv->xve_name, priv->hca_caps);
+ set_bit(XVE_FLAG_CSUM, &priv->flags);
priv->netdev->hw_features |=
NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+ }
- if (priv->hca_caps & IB_DEVICE_UD_TSO)
+ if (priv->hca_caps & IB_DEVICE_UD_TSO) {
+ pr_info("XVE: %s Setting TSO offload %s[%x]\n",
+ __func__, priv->xve_name, priv->hca_caps);
priv->netdev->hw_features |= NETIF_F_TSO;
+ }
}
priv->netdev->features |= priv->netdev->hw_features;
- if (priv->lro_mode && lro) {
- priv->netdev->features |= NETIF_F_LRO;
- xve_lro_setup(priv);
- } else
- priv->lro_mode = 0;
-
/* Reserve extra space for EoIB header */
priv->netdev->hard_header_len += sizeof(struct xve_eoib_hdr);
}
return 0;
}
-static int xve_check_for_hca(xsmp_cookie_t xsmp_hndl)
+static int xve_check_for_hca(xsmp_cookie_t xsmp_hndl, u8 *is_titan)
{
struct ib_device *hca;
struct xsmp_session_info xsmp_info;
xcpm_get_xsmp_session_info(xsmp_hndl, &xsmp_info);
hca = xsmp_info.ib_device;
+ if (strncmp(hca->name, "sif", 3) == 0)
+ *is_titan = (u8)1;
if (!((strncmp(hca->name, "mlx4", 4) != 0) ||
(strncmp(hca->name, "sif0", 4) != 0)))
xmsgp->hca_data_qp = cpu_to_be32(priv->qp->qp_num);
xmsgp->hca_qkey = cpu_to_be32(priv->qkey);
xmsgp->hca_pkey = cpu_to_be16(priv->pkey);
- xmsgp->tca_subnet_prefix =
- cpu_to_be64(priv->gw.t_gid.global.subnet_prefix);
- xmsgp->tca_guid =
- cpu_to_be64(priv->gw.t_gid.global.interface_id);
- xmsgp->tca_ctrl_qp = cpu_to_be32(priv->gw.t_ctrl_qp);
- xmsgp->tca_data_qp = cpu_to_be32(priv->gw.t_data_qp);
- xmsgp->tca_pkey = cpu_to_be16(priv->gw.t_pkey);
- xmsgp->tca_qkey = cpu_to_be16(priv->gw.t_qkey);
+ if (!priv->is_eoib) {
+ xmsgp->tca_subnet_prefix =
+ cpu_to_be64(priv->gw.t_gid.
+ global.subnet_prefix);
+ xmsgp->tca_guid =
+ cpu_to_be64(priv->gw.t_gid.global.interface_id);
+ xmsgp->tca_ctrl_qp = cpu_to_be32(priv->gw.t_ctrl_qp);
+ xmsgp->tca_data_qp = cpu_to_be32(priv->gw.t_data_qp);
+ xmsgp->tca_pkey = cpu_to_be16(priv->gw.t_pkey);
+ xmsgp->tca_qkey = cpu_to_be16(priv->gw.t_qkey);
+ }
}
pr_info("XVE: %s ACK back with admin mtu ", __func__);
pr_info("%d for %s", xmsgp->vn_mtu, priv->xve_name);
__be16 pkey_be;
__be32 net_id_be;
u8 ecode = 0;
+ u8 is_titan = 0;
- if (xve_check_for_hca(xsmp_hndl) != 0) {
+ if (xve_check_for_hca(xsmp_hndl, &is_titan) != 0) {
pr_info("Warning !!!!! Unsupported HCA card for xve ");
pr_info("interface - %s XSF feature is only ", xmsgp->xve_name);
pr_info("supported on Connect-X and PSIF HCA cards !!!!!!!");
(xmsgp->vnet_mode);
priv->net_id = be32_to_cpu(xmsgp->net_id);
priv->netdev->mtu = be16_to_cpu(xmsgp->vn_mtu);
+ pr_info("XVE: %s MTU %d - ", __func__, priv->netdev->mtu);
priv->resource_id = be64_to_cpu(xmsgp->resource_id);
priv->mp_flag = be16_to_cpu(xmsgp->mp_flag);
priv->install_flag = be32_to_cpu(xmsgp->install_flag);
/* For legacy PVI's XSMP will not have vnic_type field so
value is zero */
priv->vnic_type = xmsgp->vnic_type;
- /* Make Send and Recv Queue parmaters Per Vnic */
- priv->xve_sendq_size = xve_sendq_size;
- priv->xve_recvq_size = xve_recvq_size;
- priv->xve_max_send_cqe = xve_max_send_cqe;
+ priv->is_eoib = xve_eoib_mode ? (xmsgp->eoib_enable) : 0;
+ priv->is_titan = (is_titan) ? 1 : 0;
- if (priv->vnic_type == XSMP_XCM_UPLINK) {
- /* For G/W mode set higher values */
+ /* Make Send and Recv Queue parmaters Per Vnic */
+ if (!(priv->vnet_mode & XVE_VNET_MODE_UD)) {
+ priv->xve_sendq_size = xve_sendq_size;
+ priv->xve_recvq_size = xve_recvq_size;
+ priv->xve_max_send_cqe = xve_max_send_cqe;
+ } else {
+ /* For UD mode set higher values */
priv->xve_sendq_size = 8192;
priv->xve_recvq_size = 8192;
priv->xve_max_send_cqe = 512;
+ }
+
+ if (priv->vnic_type == XSMP_XCM_UPLINK) {
priv->gw.t_gid.global.subnet_prefix =
xve_tca_subnet ? cpu_to_be64(xve_tca_subnet) :
be64_to_cpu(xmsgp->tca_subnet_prefix);
be32_to_cpu(xmsgp->tca_data_qp);
priv->gw.t_pkey = xve_tca_pkey ? xve_tca_pkey :
be16_to_cpu(xmsgp->tca_pkey);
- /* FIXME: xmsgp->tca_qkey is u16.need to fix in osdn */
- priv->gw.t_qkey = xve_tca_qkey ? xve_tca_qkey :
- be16_to_cpu(xmsgp->tca_qkey);
xve_dbg_ctrl(priv,
"GW prefix:%llx guid:%llx, lid: %hu sl: %hu TDQP%x TCQP:%x\n",
priv->gw.t_gid.global.subnet_prefix,
if (priv->pkey == 0)
priv->pkey |= 0x8000;
/* Qkey For EDR vnic's*/
- priv->gw.t_qkey = xve_tca_qkey ? xve_tca_qkey :
- be16_to_cpu(xmsgp->tca_qkey);
+ if (priv->is_eoib) {
+ priv->gw.t_qkey = xve_tca_qkey ? xve_tca_qkey :
+ be32_to_cpu(xmsgp->global_qpkey);
+ priv->port_qkey = (port == 1 || priv->is_titan != 1) ?
+ priv->gw.t_qkey : priv->gw.t_qkey + 1;
+ } else
+ priv->gw.t_qkey = xve_tca_qkey ? xve_tca_qkey :
+ be16_to_cpu(xmsgp->tca_qkey);
/* Always set chassis ADMIN up by default */
set_bit(XVE_CHASSIS_ADMIN_UP, &priv->state);
pr_info("XVE: %s adding vnic %s ",
__func__, priv->xve_name);
- pr_info("net_id %d vnet_mode %d type%d",
- priv->net_id, priv->vnet_mode, priv->vnic_type);
+ pr_info("net_id %d vnet_mode %d type%d eoib[%s]",
+ priv->net_id, priv->vnet_mode, priv->vnic_type,
+ priv->is_eoib ? "Yes" : "no");
pr_info("port %d net_id_be %d\n", port, net_id_be);
+ pr_info("MTU port%d active%d\n", priv->port_attr.max_mtu,
+ priv->port_attr.active_mtu);
memcpy(priv->bcast_mgid.raw, bcast_mgid, sizeof(union ib_gid));
if (xve_is_edr(priv)) {
{
struct xve_dev_priv *priv;
unsigned long flags;
-int ret;
+ int ret;
priv = xve_get_xve_by_vid(be64_to_cpu(xmsgp->resource_id));
if (!priv) {
}
pr_info("XVE VNIC_READY: vnic_type: %u, subnet_prefix: %llx\n",
priv->vnic_type, priv->gw.t_gid.global.subnet_prefix);
- pr_info("ctrl_qp: %u, data_qp: %u, pkey: %x, qkey: %x\n",
+ pr_info("TCA ctrl_qp: %u, data_qp: %u, pkey: %x, qkey: %x\n",
priv->gw.t_ctrl_qp, priv->gw.t_data_qp,
priv->gw.t_pkey, priv->gw.t_qkey);