unsigned long flags;
u32 mtu;
u16 caps;
+ /* Used when checking for need to linearize SKBs with many frags */
+ unsigned max_send_sge;
};
struct ipoib_cm_rx_buf {
int hca_caps;
struct ipoib_ethtool_st ethtool;
struct timer_list poll_timer;
+ /* Used when checking for need to linearize SKBs with many frags */
+ unsigned max_send_sge;
+ /* Device specific; obtained from query_device */
+ unsigned max_sge;
};
struct ipoib_ah {
static inline void ipoib_unregister_debugfs(void) { }
#endif
+#define ipoib_dev_name(priv) (((struct ipoib_dev_priv *) priv)->dev->name)
#define ipoib_printk(level, priv, format, arg...) \
- printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg)
+ printk(level "%s: " format, ipoib_dev_name(priv), ## arg)
#define ipoib_warn(priv, format, arg...) \
ipoib_printk(KERN_WARNING, priv, format , ## arg)
+#define ipoib_warn_ratelimited(priv, format, arg...) \
+ pr_warn_ratelimited("%s: " format, ipoib_dev_name(priv), ## arg)
+
extern int ipoib_sendq_size;
extern int ipoib_recvq_size;
extern const char ipoib_driver_version[];
+static inline int ipoib_linearize_skb(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_dev_priv *priv,
+ unsigned max_send_sge)
+{
+ unsigned usable_sge = max_send_sge - !!skb_headlen(skb);
+
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn_ratelimited(priv,
+ "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+
+ /* skb_linearize returned ok but still not reducing nr_frags */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn_ratelimited(priv,
+ "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+ }
+ return 0;
+
+}
+
#endif /* _IPOIB_H */
ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
return;
}
+ if (ipoib_linearize_skb(dev, skb, priv, tx->max_send_sge) < 0)
+ return;
ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
tx->tx_head, skb->len, tx->qp->qp_num);
struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
- attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+ attr.cap.max_send_sge =
+ min_t(u32, priv->max_sge, MAX_SKB_FRAGS + 1);
tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) {
attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
tx_qp = ib_create_qp(priv->pd, &attr);
}
+ tx->max_send_sge = attr.cap.max_send_sge;
return tx_qp;
}
ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
+ priv->max_sge = attr.max_sge;
attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
ipoib_cm_create_srq(dev, attr.max_srq_sge);
if (ipoib_cm_has_srq(dev)) {
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
if (dev->features & NETIF_F_SG)
- init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+ init_attr.cap.max_send_sge =
+ min_t(u32, priv->max_sge, MAX_SKB_FRAGS + 1);
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge;
+ priv->max_send_sge = init_attr.cap.max_send_sge;
+
return 0;
out_free_send_cq: