struct net_device *netdev = pci_get_drvdata(pdev);
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       unsigned long flags;
 
        /* reclaim resources allocated to VFs */
        if (adapter->vf_data) {
                        pci_disable_sriov(pdev);
                        msleep(500);
                }
-
+               spin_lock_irqsave(&adapter->vfs_lock, flags);
                kfree(adapter->vf_mac_list);
                adapter->vf_mac_list = NULL;
                kfree(adapter->vf_data);
                adapter->vf_data = NULL;
                adapter->vfs_allocated_count = 0;
+               spin_unlock_irqrestore(&adapter->vfs_lock, flags);
                wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
                wrfl();
                msleep(100);
        igb_release_hw_control(adapter);
 
 #ifdef CONFIG_PCI_IOV
+       rtnl_lock();
        igb_disable_sriov(pdev);
+       rtnl_unlock();
 #endif
 
        unregister_netdev(netdev);
 
        spin_lock_init(&adapter->nfc_lock);
        spin_lock_init(&adapter->stats64_lock);
+
+       /* init spinlock to avoid concurrency of VF resources */
+       spin_lock_init(&adapter->vfs_lock);
 #ifdef CONFIG_PCI_IOV
        switch (hw->mac.type) {
        case e1000_82576:
 static void igb_msg_task(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
+       unsigned long flags;
        u32 vf;
 
+       spin_lock_irqsave(&adapter->vfs_lock, flags);
        for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
                /* process any reset requests */
                if (!igb_check_for_rst(hw, vf))
                if (!igb_check_for_ack(hw, vf))
                        igb_rcv_ack_from_vf(adapter, vf);
        }
+       spin_unlock_irqrestore(&adapter->vfs_lock, flags);
 }
 
 /**