xscore-y := xscore_impl.o xs_ud.o xscore_api.o xsmp.o \
xscore_stats.o xscore_uadm.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8029\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8031\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
vhba_attr.o vhba_wq.o vhba_proc.o vhba_stats.o vhba_ib.o \
vhba_scsi_intf.o vhba_align.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8029\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8031\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
obj-$(CONFIG_INFINIBAND_XSVNIC) := xsvnic.o
xsvnic-y := xsvnic_main.o xsvnic_stats.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8029\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8031\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
xve-y := xve_main.o xve_verbs.o xve_multicast.o xve_ib.o xve_tables.o \
xve_ethtool.o xve_cm.o xve_stats.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8029\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8031\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
XVE_TX_DROP_OPER_DOWN_COUNT,
XVE_TX_SKB_ALLOC_ERROR_COUNTER,
XVE_TX_RING_FULL_COUNTER,
+ XVE_TX_WMARK_REACH_COUNTER,
XVE_TX_WAKE_UP_COUNTER,
XVE_TX_QUEUE_STOP_COUNTER,
XVE_RX_SKB_COUNTER,
XVE_TX_UD_COUNTER,
XVE_TX_RC_COUNTER,
XVE_TX_MCAST_PKT,
+ XVE_TX_BCAST_PKT,
XVE_TX_MCAST_ARP_QUERY,
XVE_TX_MCAST_NDP_QUERY,
XVE_TX_MCAST_ARP_VLAN_QUERY,
struct net_device_stats stats;
struct napi_struct napi;
struct xve_ethtool_st ethtool;
+ struct timer_list poll_timer;
u8 lro_mode;
struct xve_lro lro;
unsigned long flags;
void xve_fwt_entry_free(struct xve_dev_priv *priv,
struct xve_fwt_entry *fwt_entry);
-int xve_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
+int xve_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb,
+ u8 bcast);
void xve_advert_mcast_join(struct xve_dev_priv *priv);
int xve_mcast_start_thread(struct net_device *dev);
int xve_mcast_stop_thread(struct net_device *dev, int flush);
}
}
-void xve_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
+static void xve_ib_tx_timer_func(unsigned long ctx)
{
- struct xve_dev_priv *priv = netdev_priv((struct net_device *)dev_ptr);
+ struct net_device *dev = (struct net_device *)ctx;
+ struct xve_dev_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
+ netif_tx_lock(dev);
spin_lock_irqsave(&priv->lock, flags);
if (test_bit(XVE_OPER_UP, &priv->state) &&
- !test_bit(XVE_DELETING, &priv->state)) {
+ !test_bit(XVE_DELETING, &priv->state)) {
poll_tx(priv);
}
spin_unlock_irqrestore(&priv->lock, flags);
+ if (netif_queue_stopped(dev))
+ mod_timer(&priv->poll_timer, jiffies + 1);
+
+ netif_tx_unlock(dev);
+}
+
+
+void xve_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
+{
+ struct xve_dev_priv *priv = netdev_priv((struct net_device *)dev_ptr);
+
+ mod_timer(&priv->poll_timer, jiffies);
}
static inline int post_send(struct xve_dev_priv *priv,
/* Always called with priv->lock held
* type argument is used to differentiate between the GATEWAY
* and UVNIC packet.
- * 1 -> GATEWAY PACKET
* 0 -> normal UVNIC PACKET
+ * 1 -> GATEWAY Broadcast PACKET
+ * 2 -> Sending Queued PACKET
+ * 3 -> Path Not found PACKET
*/
int xve_send(struct net_device *dev, struct sk_buff *skb,
struct xve_ah *address, u32 qpn, int type)
int hlen;
void *phead;
int ret = NETDEV_TX_OK;
+ u8 packet_sent = 0;
if (skb_is_gso(skb)) {
hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
netif_stop_queue(dev);
}
- if (netif_queue_stopped(dev)) {
- int rc;
-
- rc = ib_req_notify_cq(priv->send_cq,
- IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
- if (rc < 0)
- xve_warn(priv, "request notify on send CQ failed\n");
- else if (rc)
- poll_tx(priv);
- }
skb_orphan(skb);
skb_dst_drop(skb);
--priv->tx_outstanding;
priv->counters[XVE_TX_RING_FULL_COUNTER]++;
xve_put_ah_refcnt(address);
+ xve_free_txbuf_memory(priv, tx_req);
if (netif_queue_stopped(dev)) {
priv->counters[XVE_TX_WAKE_UP_COUNTER]++;
netif_wake_queue(dev);
}
- xve_free_txbuf_memory(priv, tx_req);
- } else
+ } else {
+ packet_sent = 1;
++priv->tx_head;
+ }
priv->send_hbeat_flag = 0;
- if (unlikely(priv->tx_outstanding > SENDQ_LOW_WMARK))
+ if (unlikely(priv->tx_outstanding > SENDQ_LOW_WMARK)) {
+ priv->counters[XVE_TX_WMARK_REACH_COUNTER]++;
poll_tx(priv);
+ }
+
+ if (packet_sent)
+ priv->counters[XVE_TX_COUNTER]++;
return ret;
}
xve_debug(DEBUG_IBDEV_INFO, priv, "%s All sends and receives done\n",
__func__);
timeout:
+ xve_warn(priv, "Deleting TX timer");
+ del_timer_sync(&priv->poll_timer);
qp_attr.qp_state = IB_QPS_RESET;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
xve_warn(priv, "Failed to modify QP to RESET state\n");
return -ENODEV;
}
+ setup_timer(&priv->poll_timer, xve_ib_tx_timer_func,
+ (unsigned long)dev);
+
if (dev->flags & IFF_UP) {
if (xve_ib_dev_open(dev) != 0) {
xve_transport_dev_cleanup(dev);
skb->dev = dev;
xve_get_ah_refcnt(path->ah);
/* Sending the queued GATEWAY Packet */
- ret = xve_send(dev, skb, path->ah, priv->gw.t_data_qp, 1);
+ ret = xve_send(dev, skb, path->ah, priv->gw.t_data_qp, 2);
if (ret == NETDEV_TX_BUSY) {
xve_warn(priv, "send queue full full, dropping packet for %s\n",
priv->xve_name);
fwt_entry = xve_fwt_lookup(&priv->xve_fwt, eth_hdr(skb)->h_dest,
vlan_tag, 0);
if (!fwt_entry) {
- if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
+ if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
+ ret = xve_mcast_send(dev,
+ (void *)priv->bcast_mgid.raw, skb, 1);
+ priv->counters[XVE_TX_BCAST_PKT]++;
+ goto stats;
+ } else if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
+ /* For Now Send Multicast Packet to G/W also */
ret = xve_mcast_send(dev,
- (void *)priv->bcast_mgid.raw, skb);
+ (void *)priv->bcast_mgid.raw, skb, 1);
priv->counters[XVE_TX_MCAST_PKT]++;
goto stats;
} else {
if (bcast_skb != NULL)
ret = xve_mcast_send(dev,
(void *)priv->bcast_mgid.
- raw, bcast_skb);
+ raw, bcast_skb, 1);
/*
* Now send the original packet also to over broadcast
* Later add counters for flood mode
if (xve_is_edr(priv) ||
len < XVE_UD_MTU(priv->max_ib_mtu)) {
ret = xve_mcast_send(dev,
- (void *)priv->bcast_mgid.raw, skb);
+ (void *)priv->bcast_mgid.raw, skb, 1);
priv->counters[XVE_TX_MCAST_FLOOD_UD]++;
} else {
if (xve_flood_rc) {
xve_debug(DEBUG_SEND_INFO, priv, "%s path ah is %p\n",
__func__, path->ah);
xve_get_ah_refcnt(path->ah);
- ret = xve_send(dev, skb, path->ah, fwt_entry->dqpn, 0);
+ ret = xve_send(dev, skb, path->ah, fwt_entry->dqpn, 3);
priv->counters[XVE_TX_UD_COUNTER]++;
goto stats;
}
stats:
INC_TX_PKT_STATS(priv, dev);
INC_TX_BYTE_STATS(priv, dev, len);
- priv->counters[XVE_TX_COUNTER]++;
free_fwt_ctx:
if (path)
xve_put_path(path, 0);
handle_action_flags(priv);
if (priv->send_hbeat_flag) {
- unsigned long flags = 0;
-
- if (unlikely(priv->tx_outstanding > SENDQ_LOW_WMARK)) {
- netif_tx_lock(priv->netdev);
- spin_lock_irqsave(&priv->lock, flags);
- poll_tx(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- netif_tx_unlock(priv->netdev);
- }
if (xve_is_ovn(priv))
xve_send_hbeat(priv);
}
return 0;
}
-int xve_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
+int xve_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb,
+ u8 broadcast)
{
struct xve_dev_priv *priv = netdev_priv(dev);
struct xve_mcast *mcast;
return ret;
}
- if (xve_is_uplink(priv) && xve_gw_linkup(priv)) {
+ if (broadcast && (xve_is_uplink(priv) && xve_gw_linkup(priv))) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
- if (nskb)
+ if (nskb) {
ret = xve_gw_send(dev, nskb);
+ if (ret != NETDEV_TX_OK)
+ return ret;
+ }
}
mcast = __xve_mcast_find(dev, mgid);
"tx drop oper down count:\t",
"tx drop skb error count:\t",
"tx drop ring full count:\t",
+ "tx ring wmark reached count:\t",
"tx wake up count\t\t",
"tx queue stop count:\t\t",
"rx_skb_count:\t\t\t",
"tx ud count:\t\t\t",
"tx rc count:\t\t\t",
"tx mcast count:\t\t\t",
+ "tx broadcast count:\t\t\t",
"tx arp count:\t\t\t",
"tx ndp count:\t\t\t",
"tx arp vlan count:\t\t",
"ib lid_active count:\t\t",
"ib pkey_change count:\t\t",
"ib invalid count:\t\t",
- "uplink unicast:\t\t\t",
+ "tx uplink broadcast:\t\t\t",
"Heartbeat Count(0x8919):\t\t",
"Link State message count:\t",
"RX frames without GRH\t\t",
seq_printf(m, "Receive Queue size: \t\t%d\n", vp->xve_recvq_size);
seq_printf(m, "Transmit Queue size: \t\t%d\n", vp->xve_sendq_size);
+ seq_printf(m, "Completion Queue size: \t\t%d\n", vp->xve_max_send_cqe);
if (vp->cm_supported) {
seq_printf(m, "Num of cm frags: \t\t%d\n", vp->cm.num_frags);