}
 }
 
-void lock_rx_qs(struct gfar_private *priv)
-{
-       int i;
-
-       for (i = 0; i < priv->num_rx_queues; i++)
-               spin_lock(&priv->rx_queue[i]->rxlock);
-}
-
 void lock_tx_qs(struct gfar_private *priv)
 {
        int i;
                spin_lock(&priv->tx_queue[i]->txlock);
 }
 
-void unlock_rx_qs(struct gfar_private *priv)
-{
-       int i;
-
-       for (i = 0; i < priv->num_rx_queues; i++)
-               spin_unlock(&priv->rx_queue[i]->rxlock);
-}
-
 void unlock_tx_qs(struct gfar_private *priv)
 {
        int i;
                priv->rx_queue[i]->rx_skbuff = NULL;
                priv->rx_queue[i]->qindex = i;
                priv->rx_queue[i]->dev = priv->ndev;
-               spin_lock_init(&(priv->rx_queue[i]->rxlock));
        }
        return 0;
 }
        switch (config.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
                if (priv->hwts_rx_en) {
-                       stop_gfar(netdev);
                        priv->hwts_rx_en = 0;
-                       startup_gfar(netdev);
+                       reset_gfar(netdev);
                }
                break;
        default:
                if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
                        return -ERANGE;
                if (!priv->hwts_rx_en) {
-                       stop_gfar(netdev);
                        priv->hwts_rx_en = 1;
-                       startup_gfar(netdev);
+                       reset_gfar(netdev);
                }
                config.rx_filter = HWTSTAMP_FILTER_ALL;
                break;
                         priv->errata);
 }
 
-static void gfar_mac_reset(struct gfar_private *priv)
+void gfar_mac_reset(struct gfar_private *priv)
 {
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
        if (priv->num_tx_queues == 1)
                priv->prio_sched_en = 1;
 
+       set_bit(GFAR_DOWN, &priv->state);
+
        gfar_hw_init(priv);
 
        err = register_netdev(dev);
 
                local_irq_save(flags);
                lock_tx_qs(priv);
-               lock_rx_qs(priv);
 
                gfar_halt_nodisable(priv);
 
 
                gfar_write(®s->maccfg1, tempval);
 
-               unlock_rx_qs(priv);
                unlock_tx_qs(priv);
                local_irq_restore(flags);
 
         */
        local_irq_save(flags);
        lock_tx_qs(priv);
-       lock_rx_qs(priv);
 
        tempval = gfar_read(®s->maccfg2);
        tempval &= ~MACCFG2_MPEN;
 
        gfar_start(priv);
 
-       unlock_rx_qs(priv);
        unlock_tx_qs(priv);
        local_irq_restore(flags);
 
 void stop_gfar(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       unsigned long flags;
 
-       phy_stop(priv->phydev);
+       netif_tx_stop_all_queues(dev);
 
+       smp_mb__before_clear_bit();
+       set_bit(GFAR_DOWN, &priv->state);
+       smp_mb__after_clear_bit();
 
-       /* Lock it down */
-       local_irq_save(flags);
-       lock_tx_qs(priv);
-       lock_rx_qs(priv);
+       disable_napi(priv);
 
+       /* disable ints and gracefully shut down Rx/Tx DMA */
        gfar_halt(priv);
 
-       unlock_rx_qs(priv);
-       unlock_tx_qs(priv);
-       local_irq_restore(flags);
+       phy_stop(priv->phydev);
 
        free_skb_resources(priv);
 }
 
        gfar_init_tx_rx_base(priv);
 
-       /* Start the controller */
+       smp_mb__before_clear_bit();
+       clear_bit(GFAR_DOWN, &priv->state);
+       smp_mb__after_clear_bit();
+
+       /* Start Rx/Tx DMA and enable the interrupts */
        gfar_start(priv);
 
        phy_start(priv->phydev);
 
+       enable_napi(priv);
+
+       netif_tx_wake_all_queues(ndev);
+
        return 0;
 }
 
        struct gfar_private *priv = netdev_priv(dev);
        int err;
 
-       enable_napi(priv);
-
        err = init_phy(dev);
-
-       if (err) {
-               disable_napi(priv);
+       if (err)
                return err;
-       }
 
        err = gfar_request_irq(priv);
        if (err)
                return err;
 
        err = startup_gfar(dev);
-       if (err) {
-               disable_napi(priv);
+       if (err)
                return err;
-       }
-
-       netif_tx_start_all_queues(dev);
 
        device_set_wakeup_enable(&dev->dev, priv->wol_en);
 
 {
        struct gfar_private *priv = netdev_priv(dev);
 
-       disable_napi(priv);
-
        cancel_work_sync(&priv->reset_task);
        stop_gfar(dev);
 
        phy_disconnect(priv->phydev);
        priv->phydev = NULL;
 
-       netif_tx_stop_all_queues(dev);
-
        gfar_free_irq(priv);
 
        return 0;
                return -EINVAL;
        }
 
+       while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+               cpu_relax();
+
        if (dev->flags & IFF_UP)
                stop_gfar(dev);
 
        if (dev->flags & IFF_UP)
                startup_gfar(dev);
 
+       clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
        return 0;
 }
 
+void reset_gfar(struct net_device *ndev)
+{
+       struct gfar_private *priv = netdev_priv(ndev);
+
+       while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+               cpu_relax();
+
+       stop_gfar(ndev);
+       startup_gfar(ndev);
+
+       clear_bit_unlock(GFAR_RESETTING, &priv->state);
+}
+
 /* gfar_reset_task gets scheduled when a packet has not been
  * transmitted after a set amount of time.
  * For now, assume that clearing out all the structures, and
 {
        struct gfar_private *priv = container_of(work, struct gfar_private,
                                                 reset_task);
-       struct net_device *dev = priv->ndev;
-
-       if (dev->flags & IFF_UP) {
-               netif_tx_stop_all_queues(dev);
-               stop_gfar(dev);
-               startup_gfar(dev);
-               netif_tx_start_all_queues(dev);
-       }
-
-       netif_tx_schedule_all(dev);
+       reset_gfar(priv->ndev);
 }
 
 static void gfar_timeout(struct net_device *dev)
        }
 
        /* If we freed a buffer, we can restart transmission, if necessary */
-       if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
-               netif_wake_subqueue(dev, tqi);
+       if (tx_queue->num_txbdfree &&
+           netif_tx_queue_stopped(txq) &&
+           !(test_bit(GFAR_DOWN, &priv->state)))
+               netif_wake_subqueue(priv->ndev, tqi);
 
        /* Update dirty indicators */
        tx_queue->skb_dirtytx = skb_dirtytx;
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       unsigned long flags;
        struct phy_device *phydev = priv->phydev;
        int new_state = 0;
 
-       local_irq_save(flags);
-       lock_tx_qs(priv);
+       if (test_bit(GFAR_RESETTING, &priv->state))
+               return;
 
        if (phydev->link) {
                u32 tempval1 = gfar_read(®s->maccfg1);
 
        if (new_state && netif_msg_link(priv))
                phy_print_status(phydev);
-       unlock_tx_qs(priv);
-       local_irq_restore(flags);
 }
 
 /* Update the hash table based on the current list of multicast
 
 
 /**
  *     struct gfar_priv_rx_q - per rx queue structure
- *     @rxlock: per queue rx spin lock
  *     @rx_skbuff: skb pointers
  *     @skb_currx: currently use skb pointer
  *     @rx_bd_base: First rx buffer descriptor
  */
 
 struct gfar_priv_rx_q {
-       spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
-       struct  sk_buff ** rx_skbuff;
+       struct  sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
        dma_addr_t rx_bd_dma_base;
        struct  rxbd8 *rx_bd_base;
        struct  rxbd8 *cur_rx;
        GFAR_ERRATA_12          = 0x08, /* a.k.a errata eTSEC49 */
 };
 
+enum gfar_dev_state {
+       GFAR_DOWN = 1,
+       GFAR_RESETTING
+};
+
 /* Struct stolen almost completely (and shamelessly) from the FCC enet source
  * (Ok, that's not so true anymore, but there is a family resemblance)
  * The GFAR buffer descriptors track the ring buffers.  The rx_bd_base
        struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
        struct gfar_priv_grp gfargrp[MAXGROUPS];
 
+       unsigned long state;
        u32 device_flags;
 
        unsigned int mode;
        }
 }
 
-void lock_rx_qs(struct gfar_private *priv);
-void lock_tx_qs(struct gfar_private *priv);
-void unlock_rx_qs(struct gfar_private *priv);
-void unlock_tx_qs(struct gfar_private *priv);
 irqreturn_t gfar_receive(int irq, void *dev_id);
 int startup_gfar(struct net_device *dev);
 void stop_gfar(struct net_device *dev);
+void reset_gfar(struct net_device *dev);
+void gfar_mac_reset(struct gfar_private *priv);
 void gfar_halt(struct gfar_private *priv);
 void gfar_start(struct gfar_private *priv);
 void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
 
                return -EINVAL;
        }
 
+       while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+               cpu_relax();
+
        if (dev->flags & IFF_UP)
                stop_gfar(dev);
 
                priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
 
        /* Rebuild the rings with the new size */
-       if (dev->flags & IFF_UP) {
+       if (dev->flags & IFF_UP)
                err = startup_gfar(dev);
-               netif_tx_wake_all_queues(dev);
-       }
+
+       clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
        return err;
 }
 
 int gfar_set_features(struct net_device *dev, netdev_features_t features)
 {
        netdev_features_t changed = dev->features ^ features;
+       struct gfar_private *priv = netdev_priv(dev);
        int err = 0;
 
        if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
                         NETIF_F_RXCSUM)))
                return 0;
 
+       while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+               cpu_relax();
+
        dev->features = features;
 
        if (dev->flags & IFF_UP) {
                /* Now we take down the rings to rebuild them */
                stop_gfar(dev);
                err = startup_gfar(dev);
-               netif_tx_wake_all_queues(dev);
+       } else {
+               gfar_mac_reset(priv);
        }
+
+       clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
        return err;
 }
 
        if (tab->index > MAX_FILER_IDX - 1)
                return -EBUSY;
 
-       /* Avoid inconsistent filer table to be processed */
-       lock_rx_qs(priv);
-
        /* Fill regular entries */
        for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
             i++)
         */
        gfar_write_filer(priv, i, 0x20, 0x0);
 
-       unlock_rx_qs(priv);
-
        return 0;
 }
 
        struct gfar_private *priv = netdev_priv(dev);
        int ret = 0;
 
+       if (test_bit(GFAR_RESETTING, &priv->state))
+               return -EBUSY;
+
        mutex_lock(&priv->rx_queue_access);
 
        switch (cmd->cmd) {