ugeth_vdbg("%s: IN", __func__);
 
+       netdev_sent_queue(dev, skb->len);
        spin_lock_irqsave(&ugeth->lock, flags);
 
        dev->stats.tx_bytes += skb->len;
 {
        /* Start from the next BD that should be filled */
        struct ucc_geth_private *ugeth = netdev_priv(dev);
+       unsigned int bytes_sent = 0;
+       int howmany = 0;
        u8 __iomem *bd;         /* BD pointer */
        u32 bd_status;
 
                skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
                if (!skb)
                        break;
-
+               howmany++;
+               bytes_sent += skb->len;
                dev->stats.tx_packets++;
 
                dev_consume_skb_any(skb);
                bd_status = in_be32((u32 __iomem *)bd);
        }
        ugeth->confBd[txQ] = bd;
+       netdev_completed_queue(dev, howmany, bytes_sent);
        return 0;
 }
 
 
        phy_start(ugeth->phydev);
        napi_enable(&ugeth->napi);
+       netdev_reset_queue(dev);
        netif_start_queue(dev);
 
        device_set_wakeup_capable(&dev->dev,
        free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
 
        netif_stop_queue(dev);
+       netdev_reset_queue(dev);
 
        return 0;
 }