#define FLAG_RX_CSUM_ENABLED   (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
 #define FLAG_RX_CSUM_ERROR     (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
 
+struct fec_enet_delayed_work {
+       struct delayed_work delay_work;
+       bool timeout;
+};
+
 /* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
  * tx_bd_base always point to the base of the buffer descriptors.  The
  * cur_rx and cur_tx point to the currently available buffer.
        /* The ring entries to be free()ed */
        struct bufdesc  *dirty_tx;
 
-       /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
-       spinlock_t hw_lock;
-
        struct  platform_device *pdev;
 
        int     opened;
        int hwts_rx_en;
        int hwts_tx_en;
        struct timer_list time_keep;
-
+       struct fec_enet_delayed_work delay_work;
 };
 
 void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
 
        u32 rcntl = OPT_FRAME_SIZE | 0x04;
        u32 ecntl = 0x2; /* ETHEREN */
 
+       if (netif_running(ndev)) {
+               netif_device_detach(ndev);
+               napi_disable(&fep->napi);
+               netif_stop_queue(ndev);
+               netif_tx_lock(ndev);
+       }
+
        /* Whack a reset.  We should wait for this. */
        writel(1, fep->hwp + FEC_ECNTRL);
        udelay(10);
 
        /* Enable interrupts we wish to service */
        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+
+       if (netif_running(ndev)) {
+               netif_device_attach(ndev);
+               napi_enable(&fep->napi);
+               netif_wake_queue(ndev);
+               netif_tx_unlock(ndev);
+       }
 }
 
 static void
 
        ndev->stats.tx_errors++;
 
-       fec_restart(ndev, fep->full_duplex);
-       netif_wake_queue(ndev);
+       fep->delay_work.timeout = true;
+       schedule_delayed_work(&(fep->delay_work.delay_work), 0);
+}
+
+static void fec_enet_work(struct work_struct *work)
+{
+       struct fec_enet_private *fep =
+               container_of(work,
+                            struct fec_enet_private,
+                            delay_work.delay_work.work);
+
+       if (fep->delay_work.timeout) {
+               fep->delay_work.timeout = false;
+               fec_restart(fep->netdev, fep->full_duplex);
+               netif_wake_queue(fep->netdev);
+       }
 }
 
 static void
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
        struct phy_device *phy_dev = fep->phy_dev;
-       unsigned long flags;
-
        int status_change = 0;
 
-       spin_lock_irqsave(&fep->hw_lock, flags);
-
        /* Prevent a state halted on mii error */
        if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
                phy_dev->state = PHY_RESUMING;
-               goto spin_unlock;
+               return;
        }
 
        if (phy_dev->link) {
                }
        }
 
-spin_unlock:
-       spin_unlock_irqrestore(&fep->hw_lock, flags);
-
        if (status_change)
                phy_print_status(phy_dev);
 }
                return -ENOMEM;
 
        memset(cbd_base, 0, PAGE_SIZE);
-       spin_lock_init(&fep->hw_lock);
 
        fep->netdev = ndev;
 
        if (fep->bufdesc_ex && fep->ptp_clock)
                netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
 
+       INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
        return 0;
 
 failed_register:
        struct fec_enet_private *fep = netdev_priv(ndev);
        int i;
 
+       cancel_delayed_work_sync(&(fep->delay_work.delay_work));
        unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
        del_timer_sync(&fep->time_keep);