dev->stats.tx_packets += pkts_compl;
        dev->stats.tx_bytes += bytes_compl;
 
+       txq = netdev_get_tx_queue(dev, ring->queue);
+       netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
        if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
-               txq = netdev_get_tx_queue(dev, ring->queue);
                if (netif_tx_queue_stopped(txq))
                        netif_tx_wake_queue(txq);
        }
        ring->prod_index += nr_frags + 1;
        ring->prod_index &= DMA_P_INDEX_MASK;
 
+       netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
+
        if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
                netif_tx_stop_queue(txq);
 
 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
        int i;
+       struct netdev_queue *txq;
 
        bcmgenet_fini_rx_napi(priv);
        bcmgenet_fini_tx_napi(priv);
                }
        }
 
+       for (i = 0; i < priv->hw_params->tx_queues; i++) {
+               txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+               netdev_tx_reset_queue(txq);
+       }
+
+       txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
+       netdev_tx_reset_queue(txq);
+
        bcmgenet_free_rx_buffers(priv);
        kfree(priv->rx_cbs);
        kfree(priv->tx_cbs);