xscore-y := xscore_impl.o xs_ud.o xscore_api.o xsmp.o \
xscore_stats.o xscore_uadm.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8016\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8020\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
vhba_attr.o vhba_wq.o vhba_proc.o vhba_stats.o vhba_ib.o \
vhba_scsi_intf.o vhba_align.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8016\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8020\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
obj-$(CONFIG_INFINIBAND_XSVNIC) := xsvnic.o
xsvnic-y := xsvnic_main.o xsvnic_stats.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8016\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8020\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
xve-y := xve_main.o xve_verbs.o xve_multicast.o xve_ib.o xve_tables.o \
xve_ethtool.o xve_cm.o xve_stats.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8016\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8020\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
XVE_MAX_LRO_DESCRIPTORS = 8,
XVE_LRO_MAX_AGGR = 64,
MAX_SEND_CQE = 32,
+ SENDQ_LOW_WMARK = 32,
XVE_CM_COPYBREAK = 256,
};
if (!test_bit(XVE_DELETING, &priv->state)) {
pr_err("%s: cm recv error", priv->xve_name);
pr_err("(status=%d, wrid=%d", wc->status, wr_id);
- pr_err("vend_err 0x%x)\n", wc->vendor_err);
+ pr_err("vend_err %x)\n", wc->vendor_err);
}
INC_RX_DROP_STATS(priv, dev);
goto repost;
if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) {
pr_err("%s: failed cm send event ", priv->xve_name);
- pr_err("(status=%d, wrid=%d vend_err 0x%x)\n",
+ pr_err("(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
xve_cm_destroy_tx_deferred(tx);
}
++priv->tx_head;
priv->send_hbeat_flag = 0;
- if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
+ if (unlikely(priv->tx_outstanding > SENDQ_LOW_WMARK))
poll_tx(priv);
return ret;
}
if (priv->send_hbeat_flag) {
unsigned long flags = 0;
- if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) {
+ if (unlikely(priv->tx_outstanding > SENDQ_LOW_WMARK)) {
netif_tx_lock(priv->netdev);
spin_lock_irqsave(&priv->lock, flags);
poll_tx(priv);
if (priv->lro_mode && lro) {
priv->netdev->features |= NETIF_F_LRO;
xve_lro_setup(priv);
- } else {
+ } else
priv->lro_mode = 0;
- }
}
void
if (priv->lro_mode && lro) {
priv->netdev->features |= NETIF_F_LRO;
xve_lro_setup(priv);
- } else {
+ } else
priv->lro_mode = 0;
- }
/* Reserve extra space for EoIB header */
priv->netdev->hard_header_len += sizeof(struct xve_eoib_hdr);
seq_printf(m, "SG UD Mode:\t\t\t%d\n", xve_ud_need_sg(vp->admin_mtu));
seq_printf(m, "Max SG supported(HCA):\t\t%d\n", vp->dev_attr.max_sge);
- seq_printf(m, "Receive Queue size: \t\t%d\n", xve_recvq_size);
- seq_printf(m, "Transmit Queue size: \t\t%d\n", xve_sendq_size);
+ seq_printf(m, "Receive Queue size: \t\t%d\n", vp->xve_recvq_size);
+ seq_printf(m, "Transmit Queue size: \t\t%d\n", vp->xve_sendq_size);
if (vp->cm_supported) {
seq_printf(m, "Num of cm frags: \t\t%d\n", vp->cm.num_frags);
.qp_type = IB_QPT_UD
};
struct ethtool_coalesce *coal;
- int ret, size, max_sge;
+ int ret, size, max_sge = MAX_SKB_FRAGS + 1;
int i;
priv->pd = ib_alloc_pd(priv->ca);
goto out_free_send_cq;
}
- for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
+ for (i = 0; i < max_sge; ++i)
priv->tx_sge[i].lkey = priv->mr->lkey;
priv->tx_wr.opcode = IB_WR_SEND;