xscore-y := xscore_impl.o xs_ud.o xscore_api.o xsmp.o \
xscore_stats.o xscore_uadm.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8033\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8034\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
break;
default:
kfree(xwork->msg);
- XSMP_ERROR("%s: Unknown message type: %d\n", __func__,
+ XSMP_INFO("%s: Unknown message type: %d\n", __func__,
m_header->type);
break;
}
vhba_attr.o vhba_wq.o vhba_proc.o vhba_stats.o vhba_ib.o \
vhba_scsi_intf.o vhba_align.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8033\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8034\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
obj-$(CONFIG_INFINIBAND_XSVNIC) := xsvnic.o
xsvnic-y := xsvnic_main.o xsvnic_stats.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8033\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8034\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
xve-y := xve_main.o xve_verbs.o xve_multicast.o xve_ib.o xve_tables.o \
xve_ethtool.o xve_cm.o xve_stats.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8033\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8034\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
XVE_STATE_MACHINE_DOWN,
XVE_STATE_MACHINE_IBCLEAR,
XVE_NAPI_POLL_COUNTER,
+ XVE_NAPI_DROP_COUNTER,
XVE_SHORT_PKT_COUNTER,
XVE_TX_COUNTER,
XVE_TX_SKB_FREE_COUNTER,
XVE_PATHREC_QUERY_COUNTER,
XVE_PATHREC_RESP_COUNTER,
XVE_PATHREC_RESP_ERR_COUNTER,
+ XVE_PATHREC_GW_COUNTER,
XVE_SM_CHANGE_COUNTER,
XVE_CLIENT_REREGISTER_COUNTER,
DEBUG_QP_INFO = 0x00040000,
DEBUG_TX_INFO = 0x00080000,
DEBUG_RX_INFO = 0x00100000,
- DEBUG_TXDATA_INFO = 0x00200000
+ DEBUG_TXDATA_INFO = 0x00200000,
+ DEBUG_INSTALL_INFO = 0x00400000,
+ DEBUG_FWTABLE_INFO = 0x00800000
};
#define XVE_OP_RECV (1ul << 31)
#define DRV_PRINT(fmt, arg...) \
PRINT(KERN_INFO, "DRV", fmt, ##arg)
#define xve_printk(level, priv, format, arg...) \
- printk(level "%s: " format, \
+ printk(level "%s: " format "\n", \
((struct xve_dev_priv *) priv)->netdev->name, \
## arg)
#define xve_warn(priv, format, arg...) \
xve_printk(KERN_WARNING, priv, format, ## arg)
+#define xve_info(priv, format, arg...) \
+ do { \
+ if (xve_debug_level & DEBUG_DRV_INFO) \
+ xve_printk(KERN_INFO, priv, format, \
+ ## arg); \
+ } while (0)
#define XSMP_INFO(fmt, arg...) \
do { \
do { \
if (xve_debug_level & level) { \
if (priv) \
- printk("%s: " format, \
+ pr_info("%s: " format "\n", \
((struct xve_dev_priv *) priv)->netdev->name, \
## arg); \
else \
- printk("XVE: " format, ## arg); \
+ pr_info("XVE: " format "\n", ## arg); \
} \
} while (0)
if (!(xve_debug_level & DEBUG_TEST_INFO))
return;
- printk("%s. Packet length is %d\n", name, length);
+ pr_info("%s. Packet length is %d\n", name, length);
tmp_len = (length >> 2) + 1;
data_ptr = (u32 *) buff;
for (i = 0; i < tmp_len; i++) {
.opcode = IB_WR_SEND,
};
-static int xve_cm_tx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static int xve_cm_tx_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *event);
static void __xve_cm_tx_reap(struct xve_dev_priv *priv);
static void xve_cm_dma_unmap_rx(struct xve_dev_priv *priv, int frags,
ret = ib_post_srq_recv(priv->cm.srq, wr, &bad_wr);
if (unlikely(ret)) {
- xve_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
+ xve_warn(priv, "post srq failed for buf %d (%d)", id, ret);
xve_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
priv->cm.srq_ring[id].mapping);
dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
skb = xve_dev_alloc_skb(priv, XVE_CM_HEAD_SIZE + NET_IP_ALIGN);
if (unlikely(!skb)) {
- xve_warn(priv, "%s Failed to allocate skb\n", __func__);
+ xve_warn(priv, "%s Failed to allocate skb", __func__);
return NULL;
}
ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page.p,
0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) {
- xve_warn(priv, "%s Failed to Map page\n", __func__);
+ xve_warn(priv, "%s Failed to Map page", __func__);
goto partial_error;
}
}
*/
p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
if (ib_post_send(p->qp, &xve_cm_rx_drain_wr, &bad_wr))
- xve_warn(priv, "failed to post drain wr\n");
+ xve_warn(priv, "failed to post drain wr");
list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
}
qp_attr.qp_state = IB_QPS_INIT;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
- xve_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
+ xve_warn(priv, "failed to init QP attr for INIT: %d", ret);
return ret;
}
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
- xve_warn(priv, "failed to modify QP to INIT: %d\n", ret);
+ xve_warn(priv, "failed to modify QP to INIT: %d", ret);
return ret;
}
qp_attr.qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
- xve_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
+ xve_warn(priv, "failed to init QP attr for RTR: %d", ret);
return ret;
}
qp_attr.rq_psn = psn;
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
- xve_warn(priv, "failed to modify QP to RTR: %d\n", ret);
+ xve_warn(priv, "failed to modify QP to RTR: %d", ret);
return ret;
}
qp_attr.qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
- xve_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
+ xve_warn(priv, "failed to init QP attr for RTS: %d", ret);
return 0;
}
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
- xve_warn(priv, "failed to modify QP to RTS: %d\n", ret);
+ xve_warn(priv, "failed to modify QP to RTS: %d", ret);
return 0;
}
return ib_send_cm_rep(cm_id, &rep);
}
-static int xve_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int xve_cm_req_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *event)
{
struct net_device *dev = cm_id->context;
struct xve_dev_priv *priv = netdev_priv(dev);
char print[512];
print_mgid_buf(print, (char *)dgid->raw);
- pr_info("XVE: %s Adding Rx QP to the path %s\n",
- priv->xve_name, print);
+ pr_info("XVE: %s Adding Rx QP%x to the path %s ctx:%p\n",
+ priv->xve_name, p->qp->qp_num, print, p);
path->cm_ctx_rx = p;
} else {
priv->counters[XVE_PATH_NOT_SETUP]++;
ret = xve_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
if (ret) {
- xve_warn(priv, "failed to send REP: %d\n", ret);
+ xve_warn(priv, "failed to send REP: %d", ret);
if (ib_modify_qp(p->qp, &xve_cm_err_attr, IB_QP_STATE))
- xve_warn(priv, "unable to move qp to error state\n");
+ xve_warn(priv, "unable to move qp to error state");
}
return 0;
return ret;
}
-static int xve_cm_rx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int xve_cm_rx_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *event)
{
struct xve_cm_ctx *p;
struct xve_dev_priv *priv;
p = cm_id->context;
priv = netdev_priv(p->netdev);
if (ib_modify_qp(p->qp, &xve_cm_err_attr, IB_QP_STATE))
- xve_warn(priv, "unable to move qp to error state\n");
+ xve_warn(priv, "unable to move qp to error state");
/* Fall through */
default:
return 0;
/* Adjust length of skb with fragments to match received data */
-static inline void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
- unsigned int length, struct sk_buff *toskb)
+static inline void skb_put_frags(struct sk_buff *skb,
+ unsigned int hdr_space,
+ unsigned int length, struct sk_buff *toskb)
{
int i, num_frags;
unsigned int size;
struct sk_buff *small_skb;
u16 vlan;
- xve_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
+ xve_dbg_data(priv, "cm recv completion: id %d, status: %d",
wr_id, wc->status);
if (unlikely(wr_id >= priv->xve_recvq_size)) {
spin_unlock_irqrestore(&priv->lock, flags);
} else
xve_warn(priv,
- "cm recv completion event with wrid %d (> %d)\n",
+ "cm recv completion event with wrid %d (> %d)",
wr_id, priv->xve_recvq_size);
return;
}
skb = rx_ring[wr_id].skb;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
- if (!test_bit(XVE_DELETING, &priv->state)) {
- pr_err("%s: cm recv error", priv->xve_name);
- pr_err("(status=%d, wrid=%d", wc->status, wr_id);
- pr_err("vend_err %x)\n", wc->vendor_err);
- }
+ if (!test_bit(XVE_DELETING, &priv->state))
+ xve_dbg_data(priv,
+ "cm recv err QP%x status:%d wr:%d vendor_err%x",
+ wc->qp->qp_num, wc->status, wr_id,
+ wc->vendor_err);
INC_RX_DROP_STATS(priv, dev);
goto repost;
}
priv->counters[XVE_RC_RXCOMPL_COUNTER]++;
xve_send_skb(priv, skb);
repost:
- if (unlikely(xve_cm_post_receive_srq(dev, wr_id))) {
- xve_warn(priv, "xve_cm_post_receive_srq failed ");
- xve_warn(priv, "for buf %d\n", wr_id);
- }
+ if (unlikely(xve_cm_post_receive_srq(dev, wr_id)))
+ xve_warn(priv, "cm post srq failed for buf %d", wr_id);
}
static inline int post_send(struct xve_dev_priv *priv,
struct xve_cm_buf *tx_req;
u64 addr;
int ret = NETDEV_TX_OK;
+ uint32_t wr_id;
if (unlikely(skb->len > tx->mtu + VLAN_ETH_HLEN)) {
xve_warn(priv,
- "packet len %d (> %d) too long to send, dropping\n",
+ "packet len %d (> %d) too long to send, dropping",
skb->len, tx->mtu);
INC_TX_DROP_STATS(priv, dev);
INC_TX_ERROR_STATS(priv, dev);
}
xve_dbg_data(priv,
- "sending packet: head 0x%x length %d connection 0x%x\n",
+ "sending packet: head 0x%x length %d connection 0x%x",
tx->tx_head, skb->len, tx->qp->qp_num);
/*
* means we have to make sure everything is properly recorded and
* our state is consistent before we call post_send().
*/
- tx_req = &tx->tx_ring[tx->tx_head & (priv->xve_sendq_size - 1)];
+ wr_id = tx->tx_head & (priv->xve_sendq_size - 1);
+ tx_req = &tx->tx_ring[wr_id];
tx_req->skb = skb;
addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
}
tx_req->mapping[0] = addr;
- if (unlikely(post_send(priv, tx, tx->tx_head &
- (priv->xve_sendq_size - 1),
+ if (unlikely(post_send(priv, tx, wr_id,
addr, skb->len))) {
- xve_warn(priv, "post_send failed\n");
+ xve_warn(priv, "QP[%d] post_send failed wr_id:%d ctx:%p",
+ tx->qp->qp_num, wr_id, tx);
INC_TX_ERROR_STATS(priv, dev);
xve_cm_tx_buf_free(priv, tx_req);
} else {
tx->qp->qp_num);
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
xve_warn(priv,
- "request notify on send CQ failed\n");
+ "request notify on send CQ failed");
priv->counters[XVE_TX_RING_FULL_COUNTER]++;
priv->counters[XVE_TX_QUEUE_STOP_COUNTER]++;
netif_stop_queue(dev);
wr_id, wc->status);
if (unlikely(wr_id >= priv->xve_sendq_size)) {
- xve_warn(priv, "cm send completion event with wrid %d (> %d)\n",
+ xve_warn(priv, "cm send completion event with wrid %d (> %d)",
wr_id, priv->xve_sendq_size);
return;
}
}
if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) {
- pr_err("%s: failed cm send event ", priv->xve_name);
- pr_err("(status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
+ if (wc->status != IB_WC_RNR_RETRY_EXC_ERR)
+ xve_warn(priv, "QP[%x] failed cm send event status:%d wrid:%d vend_err:%x",
+ wc->qp->qp_num, wc->status, wr_id,
+ wc->vendor_err);
+ else
+ xve_debug(DEBUG_CM_INFO, priv, "QP[%x] status:%d wrid:%d vend_err:%x",
+ wc->qp->qp_num, wc->status, wr_id,
+ wc->vendor_err);
xve_cm_destroy_tx_deferred(tx);
}
netif_tx_unlock(dev);
spin_unlock_irq(&priv->lock);
ret = ib_modify_qp(p->qp, &xve_cm_err_attr, IB_QP_STATE);
if (ret)
- xve_warn(priv, "unable to move qp to error state: %d\n",
- ret);
+ xve_warn(priv, "QP[%x] unable to move error state[%d]",
+ p->qp ? p->qp->qp_num : 0, ret);
spin_lock_irq(&priv->lock);
}
!list_empty(&priv->cm.rx_flush_list) ||
!list_empty(&priv->cm.rx_drain_list)) {
if (time_after(jiffies, begin + 5 * HZ)) {
- xve_warn(priv, "RX drain timing out\n");
+ xve_warn(priv, "RX drain timing out");
/*
* assume the HW is wedged and just free up everything.
p->mtu = be32_to_cpu(data->mtu);
if (p->mtu <= ETH_HLEN) {
- xve_warn(priv, "Rejecting connection: mtu %d <= %d\n",
+ xve_warn(priv, "Rejecting connection: mtu %d <= %d",
p->mtu, ETH_HLEN);
return -EINVAL;
}
qp_attr.qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
- xve_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
+ xve_warn(priv, "failed to init QP attr for RTR: %d", ret);
return ret;
}
qp_attr.rq_psn = 0; /* FIXME */
ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
if (ret) {
- xve_warn(priv, "failed to modify QP to RTR: %d\n", ret);
+ xve_warn(priv, "failed to modify QP to RTR: %d", ret);
return ret;
}
qp_attr.qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
- xve_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
+ xve_warn(priv, "failed to init QP attr for RTS: %d", ret);
return ret;
}
ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
if (ret) {
- xve_warn(priv, "failed to modify QP to RTS: %d\n", ret);
+ xve_warn(priv, "failed to modify QP to RTS: %d", ret);
return ret;
}
skb->dev = p->netdev;
if (dev_queue_xmit(skb)) {
xve_warn(priv, "dev_queue_xmit failed ");
- xve_warn(priv, "to requeue packet\n");
+ xve_warn(priv, "to requeue packet");
} else {
xve_dbg_data(priv, "%s Succefully sent skb\n",
__func__);
ret = ib_send_cm_rtu(cm_id, NULL, 0);
if (ret) {
- xve_warn(priv, "failed to send RTU: %d\n", ret);
+ xve_warn(priv, "failed to send RTU: %d", ret);
return ret;
}
return 0;
ret =
ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
if (ret) {
- xve_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
+ xve_warn(priv, "pkey 0x%x not found: %d", priv->pkey, ret);
return ret;
}
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
- xve_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
+ xve_warn(priv, "failed to modify tx QP to INIT: %d", ret);
return ret;
}
return 0;
p->tx_ring = vmalloc(priv->xve_sendq_size * sizeof(*p->tx_ring));
if (IS_ERR(p->tx_ring)) {
- xve_warn(priv, "failed to allocate tx ring\n");
+ xve_warn(priv, "failed to allocate tx ring");
ret = -ENOMEM;
goto err_tx;
}
p->qp = xve_cm_create_tx_qp(p->netdev, p);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
- xve_warn(priv, "failed to allocate tx qp: %d\n", ret);
+ xve_warn(priv, "failed to allocate tx qp: %d", ret);
goto err_qp;
}
p->id = ib_create_cm_id(priv->ca, xve_cm_tx_handler, p);
if (IS_ERR(p->id)) {
ret = PTR_ERR(p->id);
- xve_warn(priv, "failed to create tx cm id: %d\n", ret);
+ xve_warn(priv, "failed to create tx cm id: %d", ret);
goto err_id;
}
ret = xve_cm_modify_tx_init(p->netdev, p->id, p->qp);
if (ret) {
- xve_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
+ xve_warn(priv, "failed to modify tx qp to rtr: %d", ret);
goto err_modify;
}
ret = xve_cm_send_req(p->netdev, p->id, p->qp, pathrec);
if (ret) {
- xve_warn(priv, "failed to send cm req: %d\n", ret);
+ xve_warn(priv, "failed to send cm req: %d", ret);
goto err_send_cm;
}
- xve_debug(DEBUG_CM_INFO, priv, "%s Request connection", __func__);
- xve_debug(DEBUG_CM_INFO, priv, "0x%x for gid", p->qp->qp_num);
- xve_debug(DEBUG_CM_INFO, priv, "%pI6 net_id 0x%x\n", pathrec->dgid.raw,
- priv->net_id);
-
+ pr_info("%s QP[%x] Tx Created path %pI6 ctx:%p\n", priv->xve_name,
+ p->qp->qp_num, pathrec->dgid.raw, p);
return 0;
err_send_cm:
struct xve_cm_buf *tx_req;
unsigned long begin;
unsigned long flags = 0;
+ uint32_t qp_num = p->qp ? p->qp->qp_num : 0;
+
+ xve_debug(DEBUG_CM_INFO, priv,
+ "QP[%x] ctx:%p Destroy active conn head[0x%x] tail[0x%x]",
+ qp_num, p, p->tx_head, p->tx_tail);
- xve_debug(DEBUG_CM_INFO, priv, "%s Destroy active conn", __func__);
- xve_debug(DEBUG_CM_INFO, priv, "0x%x head", p->qp ? p->qp->qp_num : 0);
- xve_debug(DEBUG_CM_INFO, priv, " 0x%x tail 0x%x\n", p->tx_head,
- p->tx_tail);
if (p->id)
ib_destroy_cm_id(p->id);
while ((int)p->tx_tail - (int)p->tx_head < 0) {
tx_req = &p->tx_ring[p->tx_tail & (priv->xve_sendq_size - 1)];
-
++p->tx_tail;
spin_unlock_irqrestore(&priv->lock, flags);
}
spin_unlock_irqrestore(&priv->lock, flags);
- pr_info("%s [xve %s] Destroyed active con", __func__, priv->xve_name);
- pr_info("qp [0x%x] head", p->qp ? p->qp->qp_num : 0);
- pr_info("0x%x tail 0x%x\n", p->tx_head, p->tx_tail);
+ xve_warn(priv, "QP[%x] Destroyed, head[0x%x] tail[0x%x]",
+ qp_num, p->tx_head, p->tx_tail);
if (p->qp)
ib_destroy_qp(p->qp);
if (p->tx_ring)
kfree(p);
}
-static int xve_cm_tx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int xve_cm_tx_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *event)
{
struct xve_cm_ctx *tx = cm_id->context;
struct xve_dev_priv *priv;
dev = priv->netdev;
switch (event->event) {
case IB_CM_DREQ_RECEIVED:
- xve_debug(DEBUG_CM_INFO, priv, "%s DREQ received QP %x\n",
+ xve_debug(DEBUG_CM_INFO, priv, "%s DREQ received QP %x",
__func__, tx->qp ? tx->qp->qp_num : 0);
ib_send_cm_drep(cm_id, NULL, 0);
break;
case IB_CM_REP_RECEIVED:
- xve_debug(DEBUG_CM_INFO, priv, "%s REP received QP %x\n",
+ xve_debug(DEBUG_CM_INFO, priv, "%s REP received QP %x",
__func__, tx->qp ? tx->qp->qp_num : 0);
ret = xve_cm_rep_handler(cm_id, event);
if (ret)
case IB_CM_REQ_ERROR:
case IB_CM_REJ_RECEIVED:
case IB_CM_TIMEWAIT_EXIT:
- pr_info("%s CM event %d [dev %s] QP %x\n", __func__,
+ pr_info("%s CM event %d [dev %s] QP %x", __func__,
event->event, dev->name, tx->qp ? tx->qp->qp_num : 0);
netif_tx_lock_bh(dev);
/*
spin_unlock_irq(&priv->lock);
ret = ib_modify_qp(p->qp, &xve_cm_err_attr, IB_QP_STATE);
if (ret)
- xve_warn(priv, "unable to move qp to error state: %d\n",
+ xve_warn(priv, "unable to move qp to error state: %d",
ret);
spin_lock_irq(&priv->lock);
}
min_t(int,
ALIGN((priv->admin_mtu + VLAN_ETH_HLEN),
PAGE_SIZE) / PAGE_SIZE, attr.max_srq_sge);
- xve_debug(DEBUG_CM_INFO, priv, "%s max_srq_sge=%d\n", __func__,
+ xve_debug(DEBUG_CM_INFO, priv, "%s max_srq_sge=%d", __func__,
attr.max_srq_sge);
xve_cm_create_srq(dev, attr.max_srq_sge);
priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x20;
priv->cm.num_frags = attr.max_srq_sge;
xve_debug(DEBUG_CM_INFO, priv,
- "%s max_cm_mtu = 0x%x, num_frags=%d\n", __func__,
+ "%s max_cm_mtu = 0x%x, num_frags=%d", __func__,
priv->cm.max_cm_mtu, priv->cm.num_frags);
} else {
pr_notice("XVE: Non-SRQ mode not supported\n");
priv->cm.
srq_ring[i].mapping)) {
xve_warn(priv,
- "%s failed to allocate rc ",
- __func__);
- xve_warn(priv,
- "receive buffer %d\n", i);
+ "%s failed to allocate rbuf rc%d",
+ __func__, i);
xve_cm_dev_cleanup(dev);
return -ENOMEM;
}
if (xve_cm_post_receive_srq(dev, i)) {
- xve_warn(priv, "xve_cm_post_receive_srq ");
- xve_warn(priv, "failed for buf %d\n", i);
+ xve_warn(priv, "SRQ post failed buf:%d", i);
xve_cm_dev_cleanup(dev);
return -EIO;
}
if (!priv->cm_supported || !priv->cm.srq)
return;
- xve_debug(DEBUG_CM_INFO, priv, "%s Cleanup xve CM\n", __func__);
+ xve_debug(DEBUG_CM_INFO, priv, "%s Cleanup xve CM", __func__);
ret = ib_destroy_srq(priv->cm.srq);
if (ret)
- xve_warn(priv, "ib_destroy_srq failed: %d\n", ret);
+ xve_warn(priv, "ib_destroy_srq failed: %d", ret);
priv->cm.srq = NULL;
if (!priv->cm.srq_ring)
coal->rx_coalesce_usecs);
if (ret) {
- xve_warn(priv, "failed modifying CQ (%d)\n", ret);
+ xve_debug(DEBUG_INSTALL_INFO, priv,
+ "failed modifying CQ (%d)\n", ret);
return ret;
}
int n, i;
n = ib_poll_cq(priv->recv_cq, num_polls, priv->ibwc);
+ if (n < 0)
+ xve_warn(priv, "%s ib_poll_cq() failed, rc %d",
+ __func__, n);
for (i = 0; i < n; ++i) {
/*
* Convert any successful completions to flush
done = 0;
- priv->counters[XVE_NAPI_POLL_COUNTER]++;
/*
* If not connected complete it
*/
if (!(test_bit(XVE_OPER_UP, &priv->state) ||
test_bit(XVE_HBEAT_LOST, &priv->state))) {
+ priv->counters[XVE_NAPI_DROP_COUNTER]++;
napi_complete(&priv->napi);
clear_bit(XVE_INTR_ENABLED, &priv->state);
return 0;
}
+ priv->counters[XVE_NAPI_POLL_COUNTER]++;
+
poll_more:
while (done < budget) {
int max = (budget - done);
t = min(XVE_NUM_WC, max);
n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
+ if (n < 0)
+ xve_warn(priv, "%s ib_poll_cq() failed, rc %d",
+ __func__, n);
for (i = 0; i < n; i++) {
struct ib_wc *wc = priv->ibwc + i;
void *phead;
int ret = NETDEV_TX_OK;
u8 packet_sent = 0;
+ int id;
+ id = priv->tx_head & (priv->xve_sendq_size - 1);
if (skb_is_gso(skb)) {
hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
phead = skb->data;
"%s linear data too small dropping %ld packets %s\n",
__func__, dev->stats.tx_dropped,
dev->name);
- INC_TX_DROP_STATS(priv, dev);
- xve_put_ah_refcnt(address);
- dev_kfree_skb_any(skb);
- return ret;
+ goto drop_pkt;
}
} else {
int max_packet_len;
max_packet_len = priv->mcast_mtu + VLAN_ETH_HLEN;
if (unlikely(skb->len > max_packet_len)) {
- xve_warn(priv, "%s packet len %d", __func__, skb->len);
- xve_warn(priv, "(> %d) too long to", max_packet_len);
- xve_warn(priv, "send,dropping %ld packets %s\n",
- dev->stats.tx_dropped, dev->name);
- INC_TX_DROP_STATS(priv, dev);
- xve_put_ah_refcnt(address);
- dev_kfree_skb_any(skb);
- return ret;
+ xve_info(priv,
+ "packet len %d (>%d) too long dropping",
+ skb->len, max_packet_len);
+ goto drop_pkt;
}
phead = NULL;
hlen = 0;
* means we have to make sure everything is properly recorded and
* our state is consistent before we call post_send().
*/
- tx_req = &priv->tx_ring[priv->tx_head & (priv->xve_sendq_size - 1)];
+ tx_req = &priv->tx_ring[id];
tx_req->skb = skb;
tx_req->ah = address;
if (unlikely(xve_dma_map_tx(priv->ca, tx_req))) {
/* Queue almost full */
if (++priv->tx_outstanding == priv->xve_sendq_size) {
xve_dbg_data(priv,
- "%s stop queue head%d out%d tail%d type%d",
- __func__, priv->tx_head, priv->tx_tail,
+ "%s stop queue id%d head%d tail%d out%d type%d",
+ __func__, id, priv->tx_head, priv->tx_tail,
priv->tx_outstanding, type);
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
xve_warn(priv, "%s Req notify on send CQ failed\n",
if (unlikely(post_send(priv, priv->tx_head & (priv->xve_sendq_size - 1),
address->ah, qpn, tx_req, phead, hlen))) {
- xve_warn(priv, "%s post_send failed head%d tail%d out%d type%d\n",
- __func__, priv->tx_head, priv->tx_tail,
- priv->tx_outstanding, type);
--priv->tx_outstanding;
+ xve_warn(priv, "%s post_send failed id%d head%d tail%d out%d type%d",
+ __func__, id, priv->tx_head, priv->tx_tail,
+ priv->tx_outstanding, type);
priv->counters[XVE_TX_RING_FULL_COUNTER]++;
xve_put_ah_refcnt(address);
xve_free_txbuf_memory(priv, tx_req);
if (packet_sent)
priv->counters[XVE_TX_COUNTER]++;
return ret;
+
+drop_pkt:
+ INC_TX_DROP_STATS(priv, dev);
+ xve_put_ah_refcnt(address);
+ dev_kfree_skb_any(skb);
+ return ret;
}
static void __xve_reap_ah(struct net_device *dev)
xve_debug(DEBUG_IBDEV_INFO, priv, "%s All sends and receives done\n",
__func__);
timeout:
- xve_warn(priv, "Deleting TX timer");
+ xve_debug(DEBUG_IBDEV_INFO, priv, "Deleting TX timer\n");
del_timer_sync(&priv->poll_timer);
qp_attr.qp_state = IB_QPS_RESET;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
struct xve_dev_priv *priv = netdev_priv(netdev);
unsigned long flags = 0;
- pr_info("XVE: %s Bringing interface up %s\n", __func__, priv->xve_name);
+ xve_info(priv, "Bringing interface up");
priv->counters[XVE_OPEN_COUNTER]++;
spin_lock_irqsave(&priv->lock, flags);
struct xve_dev_priv *priv = netdev_priv(netdev);
unsigned long flags = 0;
- pr_info("XVE: %s Stopping interface %s\n", __func__, priv->xve_name);
+ xve_info(priv, "Stopping interface");
spin_lock_irqsave(&priv->lock, flags);
clear_bit(XVE_FLAG_ADMIN_UP, &priv->flags);
xve_xsmp_send_oper_state(priv, priv->resource_id,
XSMP_XVE_OPER_DOWN);
- pr_info("XVE: %s Finished Stopping interface %s\n", __func__,
- priv->xve_name);
+ xve_debug(DEBUG_IBDEV_INFO, priv,
+ "%s Stopped interface %s\n", __func__,
+ priv->xve_name);
return 0;
}
{
struct xve_dev_priv *priv = netdev_priv(netdev);
- pr_info("XVE: %s changing mtu from %d to %d\n",
- priv->xve_name, priv->admin_mtu, new_mtu);
+ xve_info(priv, "changing mtu from %d to %d",
+ priv->admin_mtu, new_mtu);
if (new_mtu == netdev->mtu)
return 0;
while ((skb = __skb_dequeue(&uplink_skqueue))) {
skb->dev = dev;
xve_get_ah_refcnt(path->ah);
+ priv->counters[XVE_PATHREC_GW_COUNTER]++;
/* Sending the queued GATEWAY Packet */
ret = xve_send(dev, skb, path->ah, priv->gw.t_data_qp, 2);
if (ret == NETDEV_TX_BUSY) {
spin_unlock_irqrestore(&priv->lock, flags);
if (test_bit(XVE_OS_ADMIN_UP, &priv->state))
napi_synchronize(&priv->napi);
- pr_info("%s Flushing mcast [xve :%s]\n", __func__,
- priv->xve_name);
+ xve_info(priv, "%s Flushing mcast", __func__);
xve_queue_work(priv, XVE_WQ_START_FLUSHNORMAL);
} else {
spin_unlock_irqrestore(&priv->lock, flags);
xsmp_msg = (struct xve_xsmp_msg *)(msg + sizeof(*header));
if (notifycmd == XSMP_XVE_OPER_UP) {
- pr_info("XVE: %s sending updated mtu for %s[mtu %d]\n",
+ xve_debug(DEBUG_INSTALL_INFO, priv,
+ "XVE: %s sending updated mtu for %s[mtu %d]\n",
__func__, priv->xve_name, priv->admin_mtu);
xsmp_msg->vn_mtu = cpu_to_be16(priv->admin_mtu);
xsmp_msg->net_id = cpu_to_be32(priv->net_id);
XVE_HBEAT_LOSS_THRES*priv->hb_interval)) {
unsigned long flags = 0;
- xve_warn(priv, "Heart Beat Loss: %lu:%lu\n",
+ xve_info(priv, "Heart Beat Loss: %lu:%lu\n",
jiffies, (unsigned long)priv->last_hbeat +
3*priv->hb_interval*HZ);
priv->netdev->hw_features =
NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_GRO;
- pr_info("XVE: %s %s flags[%x]\n",
- __func__, priv->xve_name, priv->hca_caps);
+ xve_info(priv, "%s HCA capability flags[%x]",
+ __func__, priv->hca_caps);
if (xve_enable_offload & (priv->is_eoib && priv->is_titan)) {
if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
- pr_info("XVE: %s Setting checksum offload %s[%x]\n",
- __func__, priv->xve_name, priv->hca_caps);
set_bit(XVE_FLAG_CSUM, &priv->flags);
priv->netdev->hw_features |=
NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
}
- if (priv->hca_caps & IB_DEVICE_UD_TSO) {
- pr_info("XVE: %s Setting TSO offload %s[%x]\n",
- __func__, priv->xve_name, priv->hca_caps);
+ if (priv->hca_caps & IB_DEVICE_UD_TSO)
priv->netdev->hw_features |= NETIF_F_TSO;
- }
-
}
priv->netdev->features |= priv->netdev->hw_features;
priv->lro_mode = 1;
if (priv->vnet_mode == XVE_VNET_MODE_RC) {
- pr_info("XVE: %s Setting RC mode for %s\n", __func__,
- priv->xve_name);
strcpy(priv->mode, "connected(RC)");
set_bit(XVE_FLAG_ADMIN_CM, &priv->flags);
priv->cm_supported = 1;
} else {/* UD */
- pr_info("XVE: %s Setting UD mode for %s\n", __func__,
- priv->xve_name);
strcpy(priv->mode, "datagram(UD)");
/* MTU will be reset when mcast join happens */
priv->netdev->mtu = XVE_UD_MTU(priv->max_ib_mtu);
priv->lro_mode = 0;
}
+ xve_info(priv, "%s Mode:%d MTU:%d", __func__,
+ priv->vnet_mode, priv->netdev->mtu);
priv->mcast_mtu = priv->admin_mtu = priv->netdev->mtu;
xg_setup_pseudo_device(priv->netdev, hca);
int count = 0;
- pr_info("XVE:%s Removing xve interface %s\n", __func__, priv->xve_name);
+ xve_info(priv, "%s Removing xve interface", __func__);
ib_unregister_event_handler(&priv->event_handler);
cancel_delayed_work_sync(&priv->stale_task);
rtnl_lock();
rtnl_unlock();
vmk_notify_uplink(priv->netdev);
unregister_netdev(priv->netdev);
- pr_info("XVE:%s Unregistered xve interface %s\n", __func__,
- priv->xve_name);
+ xve_info(priv, "%s Unregistered xve interface ", __func__);
/* Wait for reference count to go zero */
while (atomic_read(&priv->ref_cnt) && xve_continue_unload()) {
count++;
if (count > 20) {
- pr_info("%s: Waiting for refcnt to become", __func__);
- pr_info("zero [xve: %s] %d\n",
- priv->xve_name, atomic_read(&priv->ref_cnt));
+ xve_info(priv, "Waiting for refcnt to become zero %d",
+ atomic_read(&priv->ref_cnt));
count = 0;
}
msleep(1000);
int ret;
char *str = state == XSMP_XVE_OPER_UP ? "UP" : "DOWN";
- pr_info("XVE: %s Sending OPER state [%d:%s] to %s\n",
+ xve_debug(DEBUG_INSTALL_INFO, priv,
+ "XVE: %s Sending OPER state [%d:%s] to %s\n",
__func__, state, str, priv->xve_name);
if (state == XSMP_XVE_OPER_UP) {
set_bit(XVE_OPER_REP_SENT, &priv->state);
xmsgp->tca_qkey = cpu_to_be16(priv->gw.t_qkey);
}
}
- pr_info("XVE: %s ACK back with admin mtu ", __func__);
- pr_info("%d for %s", xmsgp->vn_mtu, priv->xve_name);
- pr_info("[netid %d ]\n", xmsgp->net_id);
+ xve_debug(DEBUG_INSTALL_INFO, priv,
+ "XVE: %s ACK back with admin mtu ", __func__);
+ xve_debug(DEBUG_INSTALL_INFO, priv,
+ "%d for %s", xmsgp->vn_mtu, priv->xve_name);
+ xve_debug(DEBUG_INSTALL_INFO, priv,
+ "[netid %d ]\n", xmsgp->net_id);
memcpy(msg + sizeof(*m_header), xmsgp, sizeof(*xmsgp));
/*
* Duplicate VID, send ACK, send oper state update
*/
- XSMP_ERROR
- ("%s: Duplicate XVE install message name: %s, VID=0x%llx\n",
+ xve_debug(DEBUG_INSTALL_INFO, priv,
+ "%s: Duplicate XVE install message name: %s, VID=0x%llx\n",
__func__, xmsgp->xve_name,
be64_to_cpu(xmsgp->resource_id));
ret = -EEXIST;
}
priv = netdev_priv(netdev);
- pr_info("XVE: %s Installing xve %s - ", __func__, xmsgp->xve_name);
- pr_info("resource id %llx", be64_to_cpu(xmsgp->resource_id));
- pr_info("priv DS %p\n", priv);
-
xcpm_get_xsmp_session_info(xsmp_hndl, &priv->xsmp_info);
hca = priv->xsmp_info.ib_device;
port = xscore_port_num(priv->xsmp_info.port);
(xmsgp->vnet_mode);
priv->net_id = be32_to_cpu(xmsgp->net_id);
priv->netdev->mtu = be16_to_cpu(xmsgp->vn_mtu);
- pr_info("XVE: %s MTU %d - ", __func__, priv->netdev->mtu);
priv->resource_id = be64_to_cpu(xmsgp->resource_id);
priv->mp_flag = be16_to_cpu(xmsgp->mp_flag);
priv->install_flag = be32_to_cpu(xmsgp->install_flag);
priv->is_titan = (is_titan) ? 1 : 0;
priv->is_jumbo = (is_jumbo) ? 1 : 0;
+ pr_info("Install VNIC:%s rID:%llx pDS:%p NetId:%d",
+ xmsgp->xve_name, be64_to_cpu(xmsgp->resource_id),
+ priv, priv->net_id);
/* Make Send and Recv Queue parmaters Per Vnic */
if (!(priv->vnet_mode & XVE_VNET_MODE_UD)) {
priv->xve_sendq_size = xve_sendq_size;
goto device_init_failed;
}
- pr_info("XVE: %s adding vnic %s ",
- __func__, priv->xve_name);
- pr_info("net_id %d vnet_mode %d type%d eoib[%s]",
- priv->net_id, priv->vnet_mode, priv->vnic_type,
- priv->is_eoib ? "Yes" : "no");
- pr_info("port %d net_id_be %d\n", port, net_id_be);
- pr_info("MTU port%d active%d\n", priv->port_attr.max_mtu,
- priv->port_attr.active_mtu);
memcpy(priv->bcast_mgid.raw, bcast_mgid, sizeof(union ib_gid));
if (xve_is_edr(priv)) {
priv->pkey |= 0x8000;
}
- pr_info("MGID: %pI6 pkey%d\n", &priv->bcast_mgid.raw, priv->pkey);
if (xve_set_dev_features(priv, hca))
goto device_init_failed;
queue_sm_work(priv, 0);
- pr_info("%s Successfully created xve [%s]\n", __func__,
- xmsgp->xve_name);
+ pr_info("%s Install Success: vnet_mode:%d type:%d eoib[%s] HPort:%d\n",
+ priv->xve_name, priv->vnet_mode, priv->vnic_type,
+ priv->is_eoib ? "Yes" : "no", port);
+ pr_info("VNIC:%s MTU[%d:%d:%d] MGID:%pI6 pkey:%d\n", priv->xve_name,
+ priv->netdev->mtu, priv->port_attr.max_mtu,
+ priv->port_attr.active_mtu,
+ &priv->bcast_mgid.raw, priv->pkey);
send_ack:
ret = xve_xsmp_send_ack(priv, xmsgp);
be64_to_cpu(xmsgp->resource_id));
}
if (update_state && priv->vnic_type == XSMP_XCM_OVN) {
- printk
- ("XVE: %s Sending Oper state to chassis for %s id %llx\n",
- __func__, priv->xve_name, priv->resource_id);
+ xve_info(priv, "Sending Oper state to chassis for id %llx\n",
+ priv->resource_id);
(void)xve_xsmp_handle_oper_req(priv->xsmp_hndl,
priv->resource_id);
}
__func__, xmsgp->xve_name);
return -1;
}
- pr_info("XVE VNIC_READY: vnic_type: %u, subnet_prefix: %llx\n",
+ xve_debug(DEBUG_INSTALL_INFO, priv,
+ "XVE VNIC_READY: vnic_type: %u, subnet_prefix: %llx\n",
priv->vnic_type, priv->gw.t_gid.global.subnet_prefix);
- pr_info("TCA ctrl_qp: %u, data_qp: %u, pkey: %x, qkey: %x\n",
+ xve_debug(DEBUG_INSTALL_INFO, priv,
+ "TCA ctrl_qp: %u, data_qp: %u, pkey: %x, qkey: %x\n",
priv->gw.t_ctrl_qp, priv->gw.t_data_qp,
priv->gw.t_pkey, priv->gw.t_qkey);
spin_unlock_irq(&priv->lock);
priv->tx_wr.wr.ud.remote_qkey = (priv->is_eoib == 1) ?
priv->port_qkey : priv->qkey;
- xve_warn(priv, "setting remote_qkey %x",
+ xve_dbg_mcast(priv, "setting remote_qkey %x",
priv->tx_wr.wr.ud.remote_qkey);
set_qkey = 1;
spin_unlock_irq(&priv->lock);
if (!xve_cm_admin_enabled(dev)) {
- printk
- ("XVE: %s xve %s dev mtu %d, admin_mtu %d, mcast_mtu %d\n",
+ xve_info(priv,
+ "XVE: %s xve %s dev mtu %d, admin_mtu %d, mcast_mtu %d\n",
__func__, priv->xve_name, priv->netdev->mtu,
priv->admin_mtu, priv->mcast_mtu);
if (!priv->is_jumbo)
mcast = xve_mcast_alloc(dev, 0);
if (!mcast) {
- xve_warn(priv, "unable to allocate memory for ");
- xve_warn(priv, "multicast structure\n");
+ xve_warn(priv,
+ "%s unable to allocate memory", __func__);
INC_TX_DROP_STATS(priv, dev);
dev_kfree_skb_any(skb);
goto out;
"state_machine_down count:\t",
"state_machine_ibclear count:\t",
"napi_poll_count:\t\t",
+ "napi_drop_count:\t\t",
"short_tx_pkt_count:\t\t",
"tx_skb_count:\t\t\t",
"tx skb free count:\t\t",
"pathrec query count:\t\t",
"pathrec resp count:\t\t",
"pathrec resp err count:\t\t",
+ "pathrec gw packet count:\t\t",
"ib sm_change count:\t\t",
"ib client_reregister count:\t",
"ib port_err count:\t\t",
seq_printf(m, "Receive Queue size: \t\t%d\n", vp->xve_recvq_size);
seq_printf(m, "Transmit Queue size: \t\t%d\n", vp->xve_sendq_size);
seq_printf(m, "Receive CQ size: \t\t%d\n", vp->xve_rcq_size);
- seq_printf(m, "Transmit CQ size: \t\t%d\n", vp->xve_scq_size);
+ seq_printf(m, "TX CQ size:\t\t\t%d\n", vp->xve_scq_size);
if (vp->cm_supported) {
seq_printf(m, "Num of cm frags: \t\t%d\n", vp->cm.num_frags);
else
strcat(tmp_buf, " + IB Device Not Opened");
+ if (test_bit(XVE_HBEAT_LOST, &vp->state))
+ strcat(tmp_buf, " + HeartBeat Lost");
+ else
+ strcat(tmp_buf, " + HeartBeat Active");
+
if (test_bit(XVE_OVER_QUOTA, &vp->state))
strcat(tmp_buf, " + No RX Quota");
if (vp->work_queue_failed != 0)
seq_printf(m, "WQ Failed:\t\t\t%ld\n", vp->work_queue_failed);
+ seq_printf(m, "TX Net queue \t\t%s %d:%d\n",
+ netif_queue_stopped(vp->netdev) ? "stopped" : "active",
+ vp->counters[XVE_TX_WAKE_UP_COUNTER],
+ vp->counters[XVE_TX_QUEUE_STOP_COUNTER]);
+
seq_printf(m, "Counters cleared count:\t\t%u\n", vp->counters_cleared);
if (xve_is_uplink(vp)) {
(XVE_FWT_ENTRY_REFRESH, &fwt_entry->state)
&& ((jiffies - fwt_entry->last_refresh) >=
priv->aging_delay)) {
- pr_info("XVE: %s MAC ", priv->xve_name);
- pr_info("%pM", smac);
- pr_info(" vlan %d Aged out\n",
- fwt_entry->vlan);
+ xve_info(priv,
+ "MAC %pM vlan %d Aged out",
+ smac, fwt_entry->vlan);
/*
* Can there be a race here where path
* becomes a bad address when paths
(fwt_entry->dgid.raw, gid->raw, sizeof(union ib_gid)))) {
print_mgid_buf(from, (char *)fwt_entry->dgid.raw);
print_mgid_buf(to, (char *)gid->raw);
- pr_info("XVE: %s MAC %pM ",
- priv->xve_name, smac);
- pr_info(" vlan %d moved from GID %s to GID %s\n",
- fwt_entry->vlan, from, to);
-
+ xve_debug(DEBUG_FWTABLE_INFO, priv,
+ "XVE: %s MAC %pM ",
+ priv->xve_name, smac);
+ xve_debug(DEBUG_FWTABLE_INFO, priv,
+ "vlan %d moved from GID %s to GID %s\n",
+ fwt_entry->vlan, from, to);
priv->counters[XVE_MAC_MOVED_COUNTER]++;
memcpy(fwt_entry->dgid.raw, gid->raw,
}
memset(fwt_entry, 0, sizeof(struct xve_fwt_entry));
print_mgid_buf(from, (char *)gid->raw);
- pr_info("XVE: %s MAC %pM", priv->xve_name, smac);
- pr_info("vlan %d learned from GID %s, mode: %s QPN %x Fwt %p\n",
+ xve_debug(DEBUG_FWTABLE_INFO, priv,
+ "XVE: %s MAC %pM", priv->xve_name, smac);
+ xve_debug(DEBUG_FWTABLE_INFO, priv,
+ "vlan %d learned from GID %s, mode: %s QPN %x Fwt %p\n",
vlan, from, qpn ? "UD" : "RC", qpn, fwt_entry);
priv->counters[XVE_MAC_LEARN_COUNTER]++;
memcpy(fwt_entry->dgid.raw, gid->raw, sizeof(union ib_gid));
xve_fwt->num--;
}
}
- pr_info("XVE: %s Forwarding table cleaned up for %s",
- __func__, priv->xve_name);
- pr_info("number of entries %d\n", xve_fwt->num);
+ xve_info(priv, "Forwarding table cleaned up entries:%d",
+ xve_fwt->num);
spin_unlock_irqrestore(&xve_fwt->lock, flags);
}
}
/* Bug 24673784 */
- if (priv->is_titan && xve_use_hugecq) {
+ if (priv->is_titan && xve_use_hugecq)
priv->xve_rcq_size = priv->xve_scq_size =
xve_use_hugecq;
- } else {
+ else {
size = priv->xve_sendq_size;
size += priv->xve_recvq_size + 1; /* 1 extra for rx_drain_qp */
priv->xve_rcq_size = size;