Context: nominally process, but don't sleep inside an rwlock
 
 dev->hard_start_xmit:
-       Synchronization: dev->xmit_lock spinlock.
+       Synchronization: netif_tx_lock spinlock.
        When the driver sets NETIF_F_LLTX in dev->features this will be
-       called without holding xmit_lock. In this case the driver 
+       called without holding netif_tx_lock. In this case the driver
        has to lock by itself when needed. It is recommended to use a try lock
        for this and return -1 when the spin lock fails. 
        The locking there should also properly protect against 
          Only valid when NETIF_F_LLTX is set.
 
 dev->tx_timeout:
-       Synchronization: dev->xmit_lock spinlock.
+       Synchronization: netif_tx_lock spinlock.
        Context: BHs disabled
        Notes: netif_queue_stopped() is guaranteed true
 
 dev->set_multicast_list:
-       Synchronization: dev->xmit_lock spinlock.
+       Synchronization: netif_tx_lock spinlock.
        Context: BHs disabled
 
 dev->poll:
 
 
        ipoib_mcast_stop_thread(dev, 0);
 
-       spin_lock_irqsave(&dev->xmit_lock, flags);
+       local_irq_save(flags);
+       netif_tx_lock(dev);
        spin_lock(&priv->lock);
 
        /*
        }
 
        spin_unlock(&priv->lock);
-       spin_unlock_irqrestore(&dev->xmit_lock, flags);
+       netif_tx_unlock(dev);
+       local_irq_restore(flags);
 
        /* We have to cancel outside of the spinlock */
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
 
 
        dvb_net_feed_stop(dev);
        priv->rx_mode = RX_MODE_UNI;
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
 
        if (dev->flags & IFF_PROMISC) {
                dprintk("%s: promiscuous mode\n", dev->name);
                }
        }
 
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
        dvb_net_feed_start(dev);
 }
 
 
        return 1;
 }
 
-/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
+/* Called with rtnl_lock from vlan functions and also netif_tx_lock
  * from set_multicast.
  */
 static void
 }
 #endif
 
-/* Called with dev->xmit_lock.
+/* Called with netif_tx_lock.
  * hard_start_xmit is pseudo-lockless - a lock is only required when
  * the tx queue is full. This way, we get the benefit of lockless
  * operations most of the time without the complexities to handle
 
         */
        bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
 
-       /* don't acquire bond device's xmit_lock when 
+       /* don't acquire bond device's netif_tx_lock when
         * transmitting */
        bond_dev->features |= NETIF_F_LLTX;
 
 
  * critical parts:
  * - rx is (pseudo-) lockless: it relies on the single-threading provided
  *     by the arch code for interrupts.
- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
+ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  *     needs dev->priv->lock :-(
- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
+ * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  */
 
 /* in dev: base, irq */
 
 /*
  * nv_start_xmit: dev->hard_start_xmit function
- * Called with dev->xmit_lock held.
+ * Called with netif_tx_lock held.
  */
 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 
 /*
  * nv_tx_timeout: dev->tx_timeout function
- * Called with dev->xmit_lock held.
+ * Called with netif_tx_lock held.
  */
 static void nv_tx_timeout(struct net_device *dev)
 {
                 * Changing the MTU is a rare event, it shouldn't matter.
                 */
                nv_disable_irq(dev);
-               spin_lock_bh(&dev->xmit_lock);
+               netif_tx_lock_bh(dev);
                spin_lock(&np->lock);
                /* stop engines */
                nv_stop_rx(dev);
                nv_start_rx(dev);
                nv_start_tx(dev);
                spin_unlock(&np->lock);
-               spin_unlock_bh(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
                nv_enable_irq(dev);
        }
        return 0;
        memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
 
        if (netif_running(dev)) {
-               spin_lock_bh(&dev->xmit_lock);
+               netif_tx_lock_bh(dev);
                spin_lock_irq(&np->lock);
 
                /* stop rx engine */
                /* restart rx engine */
                nv_start_rx(dev);
                spin_unlock_irq(&np->lock);
-               spin_unlock_bh(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
        } else {
                nv_copy_mac_to_hw(dev);
        }
 
 /*
  * nv_set_multicast: dev->set_multicast function
- * Called with dev->xmit_lock held.
+ * Called with netif_tx_lock held.
  */
 static void nv_set_multicast(struct net_device *dev)
 {
 
 {
        struct sockaddr_ax25 *sa = addr;
 
-       spin_lock_irq(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
-       spin_unlock_irq(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 
        return 0;
 }
                        break;
                }
 
-               spin_lock_irq(&dev->xmit_lock);
+               netif_tx_lock_bh(dev);
                memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
-               spin_unlock_irq(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
 
                err = 0;
                break;
 
 {
        struct sockaddr_ax25 *sa = addr;
 
-       spin_lock_irq(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
-       spin_unlock_irq(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 
        return 0;
 }
                        break;
                }
 
-               spin_lock_irq(&dev->xmit_lock);
+               netif_tx_lock_bh(dev);
                memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
-               spin_unlock_irq(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
 
                err = 0;
                break;
 
        dp->st_task_enter++;
        if ((skb = skb_peek(&dp->tq)) == NULL) {
                dp->st_txq_refl_try++;
-               if (spin_trylock(&_dev->xmit_lock)) {
+               if (netif_tx_trylock(_dev)) {
                        dp->st_rxq_enter++;
                        while ((skb = skb_dequeue(&dp->rq)) != NULL) {
                                skb_queue_tail(&dp->tq, skb);
                                dp->st_rx2tx_tran++;
                        }
-                       spin_unlock(&_dev->xmit_lock);
+                       netif_tx_unlock(_dev);
                } else {
                        /* reschedule */
                        dp->st_rxq_notenter++;
                }
        }
 
-       if (spin_trylock(&_dev->xmit_lock)) {
+       if (netif_tx_trylock(_dev)) {
                dp->st_rxq_check++;
                if ((skb = skb_peek(&dp->rq)) == NULL) {
                        dp->tasklet_pending = 0;
                                netif_wake_queue(_dev);
                } else {
                        dp->st_rxq_rsch++;
-                       spin_unlock(&_dev->xmit_lock);
+                       netif_tx_unlock(_dev);
                        goto resched;
                }
-               spin_unlock(&_dev->xmit_lock);
+               netif_tx_unlock(_dev);
        } else {
 resched:
                dp->tasklet_pending = 1;
 
                            ||  (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
                                break;
                        udelay(100);
-                       /* must not sleep here - we are called under xmit_lock! */
+                       /* must not sleep here - called under netif_tx_lock! */
                }
        }
 
 
 The rx process only runs in the interrupt handler. Access from outside
 the interrupt handler is only permitted after disable_irq().
 
-The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
+The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
 is set, then access is permitted under spin_lock_irq(&np->lock).
 
 Thus configuration functions that want to access everything must call
        disable_irq(dev->irq);
-       spin_lock_bh(dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        spin_lock_irq(&np->lock);
 
 IV. Notes
 
  * - get_stats:
  *     spin_lock_irq(np->lock), doesn't touch hw if not present
  * - hard_start_xmit:
- *     netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
+ *     synchronize_irq + netif_tx_disable;
  * - tx_timeout:
- *     netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
+ *     netif_device_detach + netif_tx_disable;
  * - set_multicast_list
- *     netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
+ *     netif_device_detach + netif_tx_disable;
  * - interrupt handler
  *     doesn't touch hw if not present, synchronize_irq waits for
  *     running instances of the interrupt handler.
                netif_device_detach(dev);
                update_csr6(dev, 0);
                iowrite32(0, ioaddr + IntrEnable);
-               netif_stop_queue(dev);
                spin_unlock_irq(&np->lock);
 
-               spin_unlock_wait(&dev->xmit_lock);
                synchronize_irq(dev->irq);
+               netif_tx_disable(dev);
        
                np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
 
 
        /* Set promiscuity / multicast*/
        priv->promiscuous = 0;
        priv->mc_count = 0;
-       __orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
+
+       /* FIXME: what about netif_tx_lock */
+       __orinoco_set_multicast_list(dev);
 
        return 0;
 }
 
  * One part is mostly used on xmit path (device)
  */
        /* hard_start_xmit synchronizer */
-       spinlock_t              xmit_lock ____cacheline_aligned_in_smp;
+       spinlock_t              _xmit_lock ____cacheline_aligned_in_smp;
        /* cpu id of processor entered to hard_start_xmit or -1,
           if nobody entered there.
         */
        clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
 }
 
+static inline void netif_tx_lock(struct net_device *dev)
+{
+       spin_lock(&dev->_xmit_lock);
+       dev->xmit_lock_owner = smp_processor_id();
+}
+
+static inline void netif_tx_lock_bh(struct net_device *dev)
+{
+       spin_lock_bh(&dev->_xmit_lock);
+       dev->xmit_lock_owner = smp_processor_id();
+}
+
+static inline int netif_tx_trylock(struct net_device *dev)
+{
+       int err = spin_trylock(&dev->_xmit_lock);
+       if (!err)
+               dev->xmit_lock_owner = smp_processor_id();
+       return err;
+}
+
+static inline void netif_tx_unlock(struct net_device *dev)
+{
+       dev->xmit_lock_owner = -1;
+       spin_unlock(&dev->_xmit_lock);
+}
+
+static inline void netif_tx_unlock_bh(struct net_device *dev)
+{
+       dev->xmit_lock_owner = -1;
+       spin_unlock_bh(&dev->_xmit_lock);
+}
+
 static inline void netif_tx_disable(struct net_device *dev)
 {
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        netif_stop_queue(dev);
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 }
 
 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
 
                printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
                return;
        }
-       spin_lock_bh(&entry->neigh->dev->xmit_lock);    /* block clip_start_xmit() */
+       netif_tx_lock_bh(entry->neigh->dev);    /* block clip_start_xmit() */
        entry->neigh->used = jiffies;
        for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
                if (*walk == clip_vcc) {
        printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
               "0x%p)\n", entry, clip_vcc);
       out:
-       spin_unlock_bh(&entry->neigh->dev->xmit_lock);
+       netif_tx_unlock_bh(entry->neigh->dev);
 }
 
 /* The neighbour entry n->lock is held. */
 
 
 #define HARD_TX_LOCK(dev, cpu) {                       \
        if ((dev->features & NETIF_F_LLTX) == 0) {      \
-               spin_lock(&dev->xmit_lock);             \
-               dev->xmit_lock_owner = cpu;             \
+               netif_tx_lock(dev);                     \
        }                                               \
 }
 
 #define HARD_TX_UNLOCK(dev) {                          \
        if ((dev->features & NETIF_F_LLTX) == 0) {      \
-               dev->xmit_lock_owner = -1;              \
-               spin_unlock(&dev->xmit_lock);           \
+               netif_tx_unlock(dev);                   \
        }                                               \
 }
 
        /* The device has no queue. Common case for software devices:
           loopback, all the sorts of tunnels...
 
-          Really, it is unlikely that xmit_lock protection is necessary here.
-          (f.e. loopback and IP tunnels are clean ignoring statistics
+          Really, it is unlikely that netif_tx_lock protection is necessary
+          here.  (f.e. loopback and IP tunnels are clean ignoring statistics
           counters.)
           However, it is possible, that they rely on protection
           made by us here.
        BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
 
        spin_lock_init(&dev->queue_lock);
-       spin_lock_init(&dev->xmit_lock);
+       spin_lock_init(&dev->_xmit_lock);
        dev->xmit_lock_owner = -1;
 #ifdef CONFIG_NET_CLS_ACT
        spin_lock_init(&dev->ingress_lock);
 
  *     Device mc lists are changed by bh at least if IPv6 is enabled,
  *     so that it must be bh protected.
  *
- *     We block accesses to device mc filters with dev->xmit_lock.
+ *     We block accesses to device mc filters with netif_tx_lock.
  */
 
 /*
 
 void dev_mc_upload(struct net_device *dev)
 {
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        __dev_mc_upload(dev);
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 }
 
 /*
        int err = 0;
        struct dev_mc_list *dmi, **dmip;
 
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
 
        for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
                /*
                         */
                        __dev_mc_upload(dev);
                        
-                       spin_unlock_bh(&dev->xmit_lock);
+                       netif_tx_unlock_bh(dev);
                        return 0;
                }
        }
        err = -ENOENT;
 done:
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
        return err;
 }
 
 
        dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
 
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
                if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
                    dmi->dmi_addrlen == alen) {
        }
 
        if ((dmi = dmi1) == NULL) {
-               spin_unlock_bh(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
                return -ENOMEM;
        }
        memcpy(dmi->dmi_addr, addr, alen);
 
        __dev_mc_upload(dev);
        
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
        return 0;
 
 done:
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
        kfree(dmi1);
        return err;
 }
 
 void dev_mc_discard(struct net_device *dev)
 {
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        
        while (dev->mc_list != NULL) {
                struct dev_mc_list *tmp = dev->mc_list;
        }
        dev->mc_count = 0;
 
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 }
 
 #ifdef CONFIG_PROC_FS
        struct dev_mc_list *m;
        struct net_device *dev = v;
 
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        for (m = dev->mc_list; m; m = m->next) {
                int i;
 
 
                seq_putc(seq, '\n');
        }
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
        return 0;
 }
 
 
 
        do {
                npinfo->tries--;
-               spin_lock(&np->dev->xmit_lock);
-               np->dev->xmit_lock_owner = smp_processor_id();
+               netif_tx_lock(np->dev);
 
                /*
                 * network drivers do not expect to be called if the queue is
                 * stopped.
                 */
                if (netif_queue_stopped(np->dev)) {
-                       np->dev->xmit_lock_owner = -1;
-                       spin_unlock(&np->dev->xmit_lock);
+                       netif_tx_unlock(np->dev);
                        netpoll_poll(np);
                        udelay(50);
                        continue;
                }
 
                status = np->dev->hard_start_xmit(skb, np->dev);
-               np->dev->xmit_lock_owner = -1;
-               spin_unlock(&np->dev->xmit_lock);
+               netif_tx_unlock(np->dev);
 
                /* success */
                if(!status) {
 
                }
        }
 
-       spin_lock_bh(&odev->xmit_lock);
+       netif_tx_lock_bh(odev);
        if (!netif_queue_stopped(odev)) {
 
                atomic_inc(&(pkt_dev->skb->users));
                pkt_dev->next_tx_ns = 0;
        }
 
-       spin_unlock_bh(&odev->xmit_lock);
+       netif_tx_unlock_bh(odev);
 
        /* If pkt_dev->count is zero, then run forever */
        if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
 
    dev->queue_lock serializes queue accesses for this device
    AND dev->qdisc pointer itself.
 
-   dev->xmit_lock serializes accesses to device driver.
+   netif_tx_lock serializes accesses to device driver.
 
-   dev->queue_lock and dev->xmit_lock are mutually exclusive,
+   dev->queue_lock and netif_tx_lock are mutually exclusive,
    if one is grabbed, another must be free.
  */
 
                 * will be requeued.
                 */
                if (!nolock) {
-                       if (!spin_trylock(&dev->xmit_lock)) {
+                       if (!netif_tx_trylock(dev)) {
                        collision:
                                /* So, someone grabbed the driver. */
                                
                                __get_cpu_var(netdev_rx_stat).cpu_collision++;
                                goto requeue;
                        }
-                       /* Remember that the driver is grabbed by us. */
-                       dev->xmit_lock_owner = smp_processor_id();
                }
                
                {
                                ret = dev->hard_start_xmit(skb, dev);
                                if (ret == NETDEV_TX_OK) { 
                                        if (!nolock) {
-                                               dev->xmit_lock_owner = -1;
-                                               spin_unlock(&dev->xmit_lock);
+                                               netif_tx_unlock(dev);
                                        }
                                        spin_lock(&dev->queue_lock);
                                        return -1;
                        /* NETDEV_TX_BUSY - we need to requeue */
                        /* Release the driver */
                        if (!nolock) { 
-                               dev->xmit_lock_owner = -1;
-                               spin_unlock(&dev->xmit_lock);
+                               netif_tx_unlock(dev);
                        } 
                        spin_lock(&dev->queue_lock);
                        q = dev->qdisc;
 {
        struct net_device *dev = (struct net_device *)arg;
 
-       spin_lock(&dev->xmit_lock);
+       netif_tx_lock(dev);
        if (dev->qdisc != &noop_qdisc) {
                if (netif_device_present(dev) &&
                    netif_running(dev) &&
                                dev_hold(dev);
                }
        }
-       spin_unlock(&dev->xmit_lock);
+       netif_tx_unlock(dev);
 
        dev_put(dev);
 }
 
 static void dev_watchdog_up(struct net_device *dev)
 {
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        __netdev_watchdog_up(dev);
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 }
 
 static void dev_watchdog_down(struct net_device *dev)
 {
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        if (del_timer(&dev->watchdog_timer))
                dev_put(dev);
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 }
 
 void netif_carrier_on(struct net_device *dev)
        while (test_bit(__LINK_STATE_SCHED, &dev->state))
                yield();
 
-       spin_unlock_wait(&dev->xmit_lock);
+       spin_unlock_wait(&dev->_xmit_lock);
 }
 
 void dev_init_scheduler(struct net_device *dev)
 
 
                switch (teql_resolve(skb, skb_res, slave)) {
                case 0:
-                       if (spin_trylock(&slave->xmit_lock)) {
-                               slave->xmit_lock_owner = smp_processor_id();
+                       if (netif_tx_trylock(slave)) {
                                if (!netif_queue_stopped(slave) &&
                                    slave->hard_start_xmit(skb, slave) == 0) {
-                                       slave->xmit_lock_owner = -1;
-                                       spin_unlock(&slave->xmit_lock);
+                                       netif_tx_unlock(slave);
                                        master->slaves = NEXT_SLAVE(q);
                                        netif_wake_queue(dev);
                                        master->stats.tx_packets++;
                                        master->stats.tx_bytes += len;
                                        return 0;
                                }
-                               slave->xmit_lock_owner = -1;
-                               spin_unlock(&slave->xmit_lock);
+                               netif_tx_unlock(slave);
                        }
                        if (netif_queue_stopped(dev))
                                busy = 1;