struct bcmgenet_tx_ring *ring)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
-       int last_tx_cn, last_c_index, num_tx_bds;
        struct enet_cb *tx_cb_ptr;
        struct netdev_queue *txq;
        unsigned int pkts_compl = 0;
-       unsigned int bds_compl;
        unsigned int c_index;
+       unsigned int txbds_ready;
+       unsigned int txbds_processed = 0;
 
        /* Compute how many buffers are transmitted since last xmit call */
        c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
-       txq = netdev_get_tx_queue(dev, ring->queue);
-
-       last_c_index = ring->c_index;
-       num_tx_bds = ring->size;
-
-       c_index &= (num_tx_bds - 1);
+       c_index &= DMA_C_INDEX_MASK;
 
-       if (c_index >= last_c_index)
-               last_tx_cn = c_index - last_c_index;
+       if (likely(c_index >= ring->c_index))
+               txbds_ready = c_index - ring->c_index;
        else
-               last_tx_cn = num_tx_bds - last_c_index + c_index;
+               txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
 
        netif_dbg(priv, tx_done, dev,
-                 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
-                 __func__, ring->index,
-                 c_index, last_tx_cn, last_c_index);
+                 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+                 __func__, ring->index, ring->c_index, c_index, txbds_ready);
 
        /* Reclaim transmitted buffers */
-       while (last_tx_cn-- > 0) {
-               tx_cb_ptr = ring->cbs + last_c_index;
-               bds_compl = 0;
+       while (txbds_processed < txbds_ready) {
+               tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
                if (tx_cb_ptr->skb) {
                        pkts_compl++;
-                       bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
+                       dev->stats.tx_packets++;
                        dev->stats.tx_bytes += tx_cb_ptr->skb->len;
                        dma_unmap_single(&dev->dev,
                                         dma_unmap_addr(tx_cb_ptr, dma_addr),
                                       DMA_TO_DEVICE);
                        dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
                }
-               dev->stats.tx_packets++;
-               ring->free_bds += bds_compl;
 
-               last_c_index++;
-               last_c_index &= (num_tx_bds - 1);
+               txbds_processed++;
+               if (likely(ring->clean_ptr < ring->end_ptr))
+                       ring->clean_ptr++;
+               else
+                       ring->clean_ptr = ring->cb_ptr;
        }
 
+       ring->free_bds += txbds_processed;
+       ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+
        if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+               txq = netdev_get_tx_queue(dev, ring->queue);
                if (netif_tx_queue_stopped(txq))
                        netif_tx_wake_queue(txq);
        }
 
-       ring->c_index = c_index;
-
        return pkts_compl;
 }
 
        }
        ring->cbs = priv->tx_cbs + start_ptr;
        ring->size = size;
+       ring->clean_ptr = start_ptr;
        ring->c_index = 0;
        ring->free_bds = size;
        ring->write_ptr = start_ptr;