unsigned int bytes_compl = 0, pkts_compl = 0;
        unsigned int entry = priv->dirty_tx;
 
-       spin_lock(&priv->tx_lock);
+       netif_tx_lock(priv->dev);
 
        priv->xstats.tx_clean++;
 
        netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
 
        if (unlikely(netif_queue_stopped(priv->dev) &&
-                    stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
-               netif_tx_lock(priv->dev);
-               if (netif_queue_stopped(priv->dev) &&
-                   stmmac_tx_avail(priv) > STMMAC_TX_THRESH) {
-                       netif_dbg(priv, tx_done, priv->dev,
-                                 "%s: restart transmit\n", __func__);
-                       netif_wake_queue(priv->dev);
-               }
-               netif_tx_unlock(priv->dev);
+           stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
+               netif_dbg(priv, tx_done, priv->dev,
+                         "%s: restart transmit\n", __func__);
+               netif_wake_queue(priv->dev);
        }
 
        if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
                stmmac_enable_eee_mode(priv);
                mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
        }
-       spin_unlock(&priv->tx_lock);
+       netif_tx_unlock(priv->dev);
 }
 
 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
        u8 proto_hdr_len;
        int i;
 
-       spin_lock(&priv->tx_lock);
-
        /* Compute header lengths */
        proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
 
                                   "%s: Tx Ring full when queue awake\n",
                                   __func__);
                }
-               spin_unlock(&priv->tx_lock);
                return NETDEV_TX_BUSY;
        }
 
        priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
                                       STMMAC_CHAN0);
 
-       spin_unlock(&priv->tx_lock);
        return NETDEV_TX_OK;
 
 dma_map_err:
-       spin_unlock(&priv->tx_lock);
        dev_err(priv->device, "Tx dma map failed\n");
        dev_kfree_skb(skb);
        priv->dev->stats.tx_dropped++;
                        return stmmac_tso_xmit(skb, dev);
        }
 
-       spin_lock(&priv->tx_lock);
-
        if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
-               spin_unlock(&priv->tx_lock);
                if (!netif_queue_stopped(dev)) {
                        netif_stop_queue(dev);
                        /* This is a hard error, log it. */
                priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
                                               STMMAC_CHAN0);
 
-       spin_unlock(&priv->tx_lock);
        return NETDEV_TX_OK;
 
 dma_map_err:
-       spin_unlock(&priv->tx_lock);
        netdev_err(priv->dev, "Tx DMA map failed\n");
        dev_kfree_skb(skb);
        priv->dev->stats.tx_dropped++;
        netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
 
        spin_lock_init(&priv->lock);
-       spin_lock_init(&priv->tx_lock);
 
        ret = register_netdev(ndev);
        if (ret) {