return ret;
 }
 
+static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
+{
+       /* Check if we will never have enough descriptors,
+        * as gso_segs can be more than current ring size
+        */
+       return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
+}
+
 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
 
 /* Use GSO to workaround all TSO packets that meet HW bug conditions
                 * vlan encapsulated.
                 */
                if (skb->protocol == htons(ETH_P_8021Q) ||
-                   skb->protocol == htons(ETH_P_8021AD))
-                       return tg3_tso_bug(tp, tnapi, txq, skb);
+                   skb->protocol == htons(ETH_P_8021AD)) {
+                       if (tg3_tso_bug_gso_check(tnapi, skb))
+                               return tg3_tso_bug(tp, tnapi, txq, skb);
+                       goto drop;
+               }
 
                if (!skb_is_gso_v6(skb)) {
                        if (unlikely((ETH_HLEN + hdr_len) > 80) &&
-                           tg3_flag(tp, TSO_BUG))
-                               return tg3_tso_bug(tp, tnapi, txq, skb);
-
+                           tg3_flag(tp, TSO_BUG)) {
+                               if (tg3_tso_bug_gso_check(tnapi, skb))
+                                       return tg3_tso_bug(tp, tnapi, txq, skb);
+                               goto drop;
+                       }
                        ip_csum = iph->check;
                        ip_tot_len = iph->tot_len;
                        iph->check = 0;
        if (would_hit_hwbug) {
                tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
 
-               if (mss) {
+               if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
                        /* If it's a TSO packet, do GSO instead of
                         * allocating and copying to a large linear SKB
                         */