if (unlikely(wc->status != IB_WC_SUCCESS)) {
ipoib_dbg(priv, "cm recv error "
- "(status=%d, wrid=%d vend_err %x)\n",
+ "(status=%d, wrid=%d vend_err 0x%x)\n",
wc->status, wr_id, wc->vendor_err);
++dev->stats.rx_dropped;
if (has_srq)
/*IB_WC_RNR_RETRY_EXC_ERR error is part of the life cycle, so don't make waves.*/
if (IB_WC_RNR_RETRY_EXC_ERR != wc->status)
ipoib_warn(priv, "%s: failed cm send event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ "(status=%d, wrid=%d vend_err 0x%x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
else
ipoib_dbg(priv, "%s: failed cm send event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ "(status=%d, wrid=%d vend_err 0x%x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
spin_lock_irqsave(&priv->lock, flags);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv, "failed recv event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ "(status=%d, wrid=%d vend_err 0x%x)\n",
wc->status, wr_id, wc->vendor_err);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
dev_kfree_skb_any(skb);
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_qp_state_validate *qp_work;
ipoib_warn(priv, "failed send event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ "(status=%d, wrid=%d vend_err 0x%x)\n",
wc->status, wr_id, wc->vendor_err);
qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
if (!qp_work) {
}
} else {
if (wc->status != IB_WC_WR_FLUSH_ERR)
- iser_err("wr id %llx status %d vend_err %x\n",
+ iser_err("wr id %llx status %d vend_err 0x%x\n",
wc->wr_id, wc->status, wc->vendor_err);
else
iser_dbg("flush error: wr id %llx\n", wc->wr_id);
}
} else {
if (wc->status != IB_WC_WR_FLUSH_ERR)
- isert_err("wr id %llx status %d vend_err %x\n",
+ isert_err("wr id %llx status %d vend_err 0x%x\n",
wc->wr_id, wc->status, wc->vendor_err);
else
isert_dbg("flush error: wr id %llx\n", wc->wr_id);
if (unlikely(wc->status)) {
if (ssk->qp_active) {
sdp_dbg(sk, "Recv completion with error. "
- "Status %d, vendor: %d\n",
+ "Status %d, vendor: 0x%x\n",
wc->status, wc->vendor_err);
sdp_reset(sk);
ssk->qp_active = 0;
if (!test_bit(XVE_DELETING, &priv->state)) {
pr_err("%s: cm recv error", priv->xve_name);
pr_err("(status=%d, wrid=%d", wc->status, wr_id);
- pr_err("vend_err %x)\n", wc->vendor_err);
+ pr_err("vend_err 0x%x)\n", wc->vendor_err);
}
INC_RX_DROP_STATS(priv, dev);
goto repost;
if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) {
pr_err("%s: failed cm send event ", priv->xve_name);
- pr_err("(status=%d, wrid=%d vend_err %x)\n",
+ pr_err("(status=%d, wrid=%d vend_err 0x%x)\n",
wc->status, wr_id, wc->vendor_err);
xve_cm_destroy_tx_deferred(tx);
}
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
xve_warn(priv, "failed recv event ");
- xve_warn(priv, "(status=%d, wrid=%d vend_err %x)\n",
+ xve_warn(priv, "(status=%d, wrid=%d vend_err 0x%x)\n",
wc->status, wr_id, wc->vendor_err);
}
xve_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) {
xve_warn(priv, "failed send event ");
- xve_warn(priv, "(status=%d, wrid=%d vend_err %x)\n",
+ xve_warn(priv, "(status=%d, wrid=%d vend_err 0x%x)\n",
wc->status, wr_id, wc->vendor_err);
}
}
conn->c_drop_source = DR_IB_RECV_COMP_ERR;
rds_ib_conn_error(conn, "recv completion "
"<%pI4,%pI4,%d> had status %u "
- "vendor_err %u, disconnecting and "
+ "vendor_err 0x%x, disconnecting and "
"reconnecting\n",
&conn->c_laddr,
&conn->c_faddr,
conn->c_drop_source = DR_IB_SEND_COMP_ERR;
rds_ib_conn_error(conn,
"send completion <%u.%u.%u.%u,%u.%u.%u.%u,%d> status "
- "%u vendor_err %u, disconnecting and reconnecting\n",
+ "%u vendor_err 0x%x, disconnecting and reconnecting\n",
NIPQUAD(conn->c_laddr),
NIPQUAD(conn->c_faddr),
conn->c_tos, wc->status, wc->vendor_err);