static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
 {
        if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&tp->indirect_lock, flags);
+               spin_lock_bh(&tp->indirect_lock);
                pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
                pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
-               spin_unlock_irqrestore(&tp->indirect_lock, flags);
+               spin_unlock_bh(&tp->indirect_lock);
        } else {
                writel(val, tp->regs + off);
                if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
 {
        if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&tp->indirect_lock, flags);
+               spin_lock_bh(&tp->indirect_lock);
                pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
                pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
-               spin_unlock_irqrestore(&tp->indirect_lock, flags);
+               spin_unlock_bh(&tp->indirect_lock);
        } else {
                void __iomem *dest = tp->regs + off;
                writel(val, dest);
 
 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&tp->indirect_lock, flags);
+       spin_lock_bh(&tp->indirect_lock);
        pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
        pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
 
        /* Always leave this as zero. */
        pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
-       spin_unlock_irqrestore(&tp->indirect_lock, flags);
+       spin_unlock_bh(&tp->indirect_lock);
 }
 
 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&tp->indirect_lock, flags);
+       spin_lock_bh(&tp->indirect_lock);
        pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
        pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
 
        /* Always leave this as zero. */
        pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
-       spin_unlock_irqrestore(&tp->indirect_lock, flags);
+       spin_unlock_bh(&tp->indirect_lock);
 }
 
 static void tg3_disable_ints(struct tg3 *tp)
        tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
                     (tp->last_tag << 24));
        tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
-
+       tp->irq_sync = 0;
        tg3_cond_int(tp);
 }
 
         * (such as after tg3_init_hw)
         */
        netif_poll_enable(tp->dev);
-       tg3_cond_int(tp);
+       tp->hw_status->status |= SD_STATUS_UPDATED;
+       tg3_enable_ints(tp);
 }
 
 static void tg3_switch_clocks(struct tg3 *tp)
                        sw_idx = NEXT_TX(sw_idx);
                }
 
-               dev_kfree_skb_irq(skb);
+               dev_kfree_skb(skb);
        }
 
        tp->tx_cons = sw_idx;
 {
        struct tg3 *tp = netdev_priv(netdev);
        struct tg3_hw_status *sblk = tp->hw_status;
-       unsigned long flags;
        int done;
 
-       spin_lock_irqsave(&tp->lock, flags);
-
        /* handle link change and other phy events */
        if (!(tp->tg3_flags &
              (TG3_FLAG_USE_LINKCHG_REG |
                if (sblk->status & SD_STATUS_LINK_CHG) {
                        sblk->status = SD_STATUS_UPDATED |
                                (sblk->status & ~SD_STATUS_LINK_CHG);
+                       spin_lock(&tp->lock);
                        tg3_setup_phy(tp, 0);
+                       spin_unlock(&tp->lock);
                }
        }
 
                spin_unlock(&tp->tx_lock);
        }
 
-       spin_unlock_irqrestore(&tp->lock, flags);
-
        /* run RX thread, within the bounds set by NAPI.
         * All RX "locking" is done by ensuring outside
         * code synchronizes with dev->poll()
        /* if no more work, tell net stack and NIC we're done */
        done = !tg3_has_work(tp);
        if (done) {
-               spin_lock_irqsave(&tp->lock, flags);
-               __netif_rx_complete(netdev);
+               spin_lock(&tp->lock);
+               netif_rx_complete(netdev);
                tg3_restart_ints(tp);
-               spin_unlock_irqrestore(&tp->lock, flags);
+               spin_unlock(&tp->lock);
        }
 
        return (done ? 0 : 1);
 }
 
+static void tg3_irq_quiesce(struct tg3 *tp)
+{
+       BUG_ON(tp->irq_sync);
+
+       tp->irq_sync = 1;
+       smp_mb();
+
+       synchronize_irq(tp->pdev->irq);
+}
+
+static inline int tg3_irq_sync(struct tg3 *tp)
+{
+       return tp->irq_sync;
+}
+
+/* Fully shutdown all tg3 driver activity elsewhere in the system.
+ * If irq_sync is non-zero, then the IRQ handler must be synchronized
+ * with as well.  Most of the time, this is not necessary except when
+ * shutting down the device.
+ */
+static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
+{
+       if (irq_sync)
+               tg3_irq_quiesce(tp);
+       spin_lock_bh(&tp->lock);
+       spin_lock(&tp->tx_lock);
+}
+
+static inline void tg3_full_unlock(struct tg3 *tp)
+{
+       spin_unlock(&tp->tx_lock);
+       spin_unlock_bh(&tp->lock);
+}
+
 /* MSI ISR - No need to check for interrupt sharing and no need to
  * flush status block and interrupt mailbox. PCI ordering rules
  * guarantee that MSI will arrive after the status block.
        struct net_device *dev = dev_id;
        struct tg3 *tp = netdev_priv(dev);
        struct tg3_hw_status *sblk = tp->hw_status;
-       unsigned long flags;
-
-       spin_lock_irqsave(&tp->lock, flags);
 
        /*
         * Writing any value to intr-mbox-0 clears PCI INTA# and
        tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
        tp->last_tag = sblk->status_tag;
        rmb();
+       if (tg3_irq_sync(tp))
+               goto out;
        sblk->status &= ~SD_STATUS_UPDATED;
        if (likely(tg3_has_work(tp)))
                netif_rx_schedule(dev);         /* schedule NAPI poll */
                tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
                             tp->last_tag << 24);
        }
-
-       spin_unlock_irqrestore(&tp->lock, flags);
-
+out:
        return IRQ_RETVAL(1);
 }
 
        struct net_device *dev = dev_id;
        struct tg3 *tp = netdev_priv(dev);
        struct tg3_hw_status *sblk = tp->hw_status;
-       unsigned long flags;
        unsigned int handled = 1;
 
-       spin_lock_irqsave(&tp->lock, flags);
-
        /* In INTx mode, it is possible for the interrupt to arrive at
         * the CPU before the status block posted prior to the interrupt.
         * Reading the PCI State register will confirm whether the
                 */
                tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
                             0x00000001);
+               if (tg3_irq_sync(tp))
+                       goto out;
                sblk->status &= ~SD_STATUS_UPDATED;
                if (likely(tg3_has_work(tp)))
                        netif_rx_schedule(dev);         /* schedule NAPI poll */
        } else {        /* shared interrupt */
                handled = 0;
        }
-
-       spin_unlock_irqrestore(&tp->lock, flags);
-
+out:
        return IRQ_RETVAL(handled);
 }
 
        struct net_device *dev = dev_id;
        struct tg3 *tp = netdev_priv(dev);
        struct tg3_hw_status *sblk = tp->hw_status;
-       unsigned long flags;
        unsigned int handled = 1;
 
-       spin_lock_irqsave(&tp->lock, flags);
-
        /* In INTx mode, it is possible for the interrupt to arrive at
         * the CPU before the status block posted prior to the interrupt.
         * Reading the PCI State register will confirm whether the
                             0x00000001);
                tp->last_tag = sblk->status_tag;
                rmb();
+               if (tg3_irq_sync(tp))
+                       goto out;
                sblk->status &= ~SD_STATUS_UPDATED;
                if (likely(tg3_has_work(tp)))
                        netif_rx_schedule(dev);         /* schedule NAPI poll */
        } else {        /* shared interrupt */
                handled = 0;
        }
-
-       spin_unlock_irqrestore(&tp->lock, flags);
-
+out:
        return IRQ_RETVAL(handled);
 }
 
 
        tg3_netif_stop(tp);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 1);
 
        restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
        tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
 
        tg3_netif_start(tp);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
        if (restart_timer)
                mod_timer(&tp->timer, jiffies + 1);
        unsigned int i;
        u32 len, entry, base_flags, mss;
        int would_hit_hwbug;
-       unsigned long flags;
 
        len = skb_headlen(skb);
 
        /* No BH disabling for tx_lock here.  We are running in BH disabled
         * context and TX reclaim runs via tp->poll inside of a software
-        * interrupt.  Rejoice!
-        *
-        * Actually, things are not so simple.  If we are to take a hw
-        * IRQ here, we can deadlock, consider:
-        *
-        *       CPU1           CPU2
-        *   tg3_start_xmit
-        *   take tp->tx_lock
-        *                      tg3_timer
-        *                      take tp->lock
-        *   tg3_interrupt
-        *   spin on tp->lock
-        *                      spin on tp->tx_lock
-        *
-        * So we really do need to disable interrupts when taking
-        * tx_lock here.
+        * interrupt.  Furthermore, IRQ processing runs lockless so we have
+        * no IRQ context deadlocks to worry about either.  Rejoice!
         */
-       local_irq_save(flags);
-       if (!spin_trylock(&tp->tx_lock)) { 
-               local_irq_restore(flags);
+       if (!spin_trylock(&tp->tx_lock))
                return NETDEV_TX_LOCKED; 
-       } 
 
        /* This is a hard error, log it. */
        if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
                netif_stop_queue(dev);
-               spin_unlock_irqrestore(&tp->tx_lock, flags);
+               spin_unlock(&tp->tx_lock);
                printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
                       dev->name);
                return NETDEV_TX_BUSY;
 
 out_unlock:
        mmiowb();
-       spin_unlock_irqrestore(&tp->tx_lock, flags);
+       spin_unlock(&tp->tx_lock);
 
        dev->trans_start = jiffies;
 
        }
 
        tg3_netif_stop(tp);
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+
+       tg3_full_lock(tp, 1);
 
        tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 
 
        tg3_netif_start(tp);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
        return 0;
 }
 
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 
-       spin_lock_irq(&tp->lock);
+       spin_lock_bh(&tp->lock);
        __tg3_set_mac_addr(tp);
-       spin_unlock_irq(&tp->lock);
+       spin_unlock_bh(&tp->lock);
 
        return 0;
 }
 static void tg3_timer(unsigned long __opaque)
 {
        struct tg3 *tp = (struct tg3 *) __opaque;
-       unsigned long flags;
 
-       spin_lock_irqsave(&tp->lock, flags);
-       spin_lock(&tp->tx_lock);
+       spin_lock(&tp->lock);
 
        if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
                /* All of this garbage is because when using non-tagged
 
                if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
                        tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
-                       spin_unlock(&tp->tx_lock);
-                       spin_unlock_irqrestore(&tp->lock, flags);
+                       spin_unlock(&tp->lock);
                        schedule_work(&tp->reset_task);
                        return;
                }
                tp->asf_counter = tp->asf_multiplier;
        }
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irqrestore(&tp->lock, flags);
+       spin_unlock(&tp->lock);
 
        tp->timer.expires = jiffies + tp->timer_offset;
        add_timer(&tp->timer);
        /* Need to reset the chip because the MSI cycle may have terminated
         * with Master Abort.
         */
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 1);
 
        tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
        err = tg3_init_hw(tp);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
        if (err)
                free_irq(tp->pdev->irq, dev);
        struct tg3 *tp = netdev_priv(dev);
        int err;
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
 
        tg3_disable_ints(tp);
        tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
        /* The placement of this call is tied
         * to the setup and use of Host TX descriptors.
                return err;
        }
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
 
        err = tg3_init_hw(tp);
        if (err) {
                tp->timer.function = tg3_timer;
        }
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
        if (err) {
                free_irq(tp->pdev->irq, dev);
                err = tg3_test_msi(tp);
 
                if (err) {
-                       spin_lock_irq(&tp->lock);
-                       spin_lock(&tp->tx_lock);
+                       tg3_full_lock(tp, 0);
 
                        if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
                                pci_disable_msi(tp->pdev);
                        tg3_free_rings(tp);
                        tg3_free_consistent(tp);
 
-                       spin_unlock(&tp->tx_lock);
-                       spin_unlock_irq(&tp->lock);
+                       tg3_full_unlock(tp);
 
                        return err;
                }
        }
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
 
        add_timer(&tp->timer);
        tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
        tg3_enable_ints(tp);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
        netif_start_queue(dev);
 
 
        del_timer_sync(&tp->timer);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 1);
 #if 0
        tg3_dump_state(tp);
 #endif
                  TG3_FLAG_GOT_SERDES_FLOWCTL);
        netif_carrier_off(tp->dev);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
        free_irq(tp->pdev->irq, dev);
        if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
        if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
-               unsigned long flags;
                u32 val;
 
-               spin_lock_irqsave(&tp->lock, flags);
+               spin_lock_bh(&tp->lock);
                if (!tg3_readphy(tp, 0x1e, &val)) {
                        tg3_writephy(tp, 0x1e, val | 0x8000);
                        tg3_readphy(tp, 0x14, &val);
                } else
                        val = 0;
-               spin_unlock_irqrestore(&tp->lock, flags);
+               spin_unlock_bh(&tp->lock);
 
                tp->phy_crc_errors += val;
 
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
        __tg3_set_rx_mode(dev);
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 }
 
 #define TG3_REGDUMP_LEN                (32 * 1024)
 
        memset(p, 0, TG3_REGDUMP_LEN);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
 
 #define __GET_REG32(reg)       (*(p)++ = tr32(reg))
 #define GET_REG32_LOOP(base,len)               \
 #undef GET_REG32_LOOP
 #undef GET_REG32_1
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 }
 
 static int tg3_get_eeprom_len(struct net_device *dev)
                        return -EINVAL;
        }
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
 
        tp->link_config.autoneg = cmd->autoneg;
        if (cmd->autoneg == AUTONEG_ENABLE) {
        if (netif_running(dev))
                tg3_setup_phy(tp, 1);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
   
        return 0;
 }
            !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
                return -EINVAL;
   
-       spin_lock_irq(&tp->lock);
+       spin_lock_bh(&tp->lock);
        if (wol->wolopts & WAKE_MAGIC)
                tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
        else
                tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
-       spin_unlock_irq(&tp->lock);
+       spin_unlock_bh(&tp->lock);
   
        return 0;
 }
        if (!netif_running(dev))
                return -EAGAIN;
 
-       spin_lock_irq(&tp->lock);
+       spin_lock_bh(&tp->lock);
        r = -EINVAL;
        tg3_readphy(tp, MII_BMCR, &bmcr);
        if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
                tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
                r = 0;
        }
-       spin_unlock_irq(&tp->lock);
+       spin_unlock_bh(&tp->lock);
   
        return r;
 }
        if (netif_running(dev))
                tg3_netif_stop(tp);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
   
        tp->rx_pending = ering->rx_pending;
 
                tg3_netif_start(tp);
        }
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
   
        return 0;
 }
        if (netif_running(dev))
                tg3_netif_stop(tp);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 1);
+
        if (epause->autoneg)
                tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
        else
                tg3_init_hw(tp);
                tg3_netif_start(tp);
        }
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+
+       tg3_full_unlock(tp);
   
        return 0;
 }
                return 0;
        }
   
-       spin_lock_irq(&tp->lock);
+       spin_lock_bh(&tp->lock);
        if (data)
                tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
        else
                tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
-       spin_unlock_irq(&tp->lock);
+       spin_unlock_bh(&tp->lock);
   
        return 0;
 }
                if (netif_running(dev))
                        tg3_netif_stop(tp);
 
-               spin_lock_irq(&tp->lock);
-               spin_lock(&tp->tx_lock);
+               tg3_full_lock(tp, 1);
 
                tg3_halt(tp, RESET_KIND_SUSPEND, 1);
                tg3_nvram_lock(tp);
                        data[4] = 1;
                }
 
-               spin_unlock(&tp->tx_lock);
-               spin_unlock_irq(&tp->lock);
+               tg3_full_unlock(tp);
+
                if (tg3_test_interrupt(tp) != 0) {
                        etest->flags |= ETH_TEST_FL_FAILED;
                        data[5] = 1;
                }
-               spin_lock_irq(&tp->lock);
-               spin_lock(&tp->tx_lock);
+
+               tg3_full_lock(tp, 0);
 
                tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
                if (netif_running(dev)) {
                        tg3_init_hw(tp);
                        tg3_netif_start(tp);
                }
-               spin_unlock(&tp->tx_lock);
-               spin_unlock_irq(&tp->lock);
+
+               tg3_full_unlock(tp);
        }
 }
 
                if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
                        break;                  /* We have no PHY */
 
-               spin_lock_irq(&tp->lock);
+               spin_lock_bh(&tp->lock);
                err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
-               spin_unlock_irq(&tp->lock);
+               spin_unlock_bh(&tp->lock);
 
                data->val_out = mii_regval;
 
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
 
-               spin_lock_irq(&tp->lock);
+               spin_lock_bh(&tp->lock);
                err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
-               spin_unlock_irq(&tp->lock);
+               spin_unlock_bh(&tp->lock);
 
                return err;
 
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
 
        tp->vlgrp = grp;
 
        /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
        __tg3_set_rx_mode(dev);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 }
 
 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
        if (tp->vlgrp)
                tp->vlgrp->vlan_devices[vid] = NULL;
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 }
 #endif
 
 
        del_timer_sync(&tp->timer);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 1);
        tg3_disable_ints(tp);
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
        netif_device_detach(dev);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
        tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
        err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
        if (err) {
-               spin_lock_irq(&tp->lock);
-               spin_lock(&tp->tx_lock);
+               tg3_full_lock(tp, 0);
 
                tg3_init_hw(tp);
 
                netif_device_attach(dev);
                tg3_netif_start(tp);
 
-               spin_unlock(&tp->tx_lock);
-               spin_unlock_irq(&tp->lock);
+               tg3_full_unlock(tp);
        }
 
        return err;
 
        netif_device_attach(dev);
 
-       spin_lock_irq(&tp->lock);
-       spin_lock(&tp->tx_lock);
+       tg3_full_lock(tp, 0);
 
        tg3_init_hw(tp);
 
 
        tg3_netif_start(tp);
 
-       spin_unlock(&tp->tx_lock);
-       spin_unlock_irq(&tp->lock);
+       tg3_full_unlock(tp);
 
        return 0;
 }