#endif
 
 enum netdev_queue_state_t {
-       __QUEUE_STATE_XOFF,
+       __QUEUE_STATE_DRV_XOFF,
+       __QUEUE_STATE_STACK_XOFF,
        __QUEUE_STATE_FROZEN,
-#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF)          | \
-                                   (1 << __QUEUE_STATE_FROZEN))
+#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF)            | \
+                             (1 << __QUEUE_STATE_STACK_XOFF))
+#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF           | \
+                                       (1 << __QUEUE_STATE_FROZEN))
 };
+/*
+ * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
+ * netif_tx_* functions below are used to manipulate this flag.  The
+ * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
+ * queue independently.  The netif_xmit_*stopped functions below are called
+ * to check if the queue has been stopped by the driver or stack (either
+ * of the XOFF bits are set in the state).  Drivers should not need to call
+ * netif_xmit*stopped functions, they should only be using netif_tx_*.
+ */
 
 struct netdev_queue {
 /*
 
 static inline void netif_schedule_queue(struct netdev_queue *txq)
 {
-       if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+       if (!(txq->state & QUEUE_STATE_ANY_XOFF))
                __netif_schedule(txq->qdisc);
 }
 
 
 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
 {
-       clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+       clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
                return;
        }
 #endif
-       if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
+       if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
                __netif_schedule(dev_queue->qdisc);
 }
 
                pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
                return;
        }
-       set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+       set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
 
 static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 {
-       return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+       return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
        return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
 }
 
-static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
+static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
 {
-       return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
+       return dev_queue->state & QUEUE_STATE_ANY_XOFF;
+}
+
+static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
+{
+       return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
+}
+
 }
 
 /**
        if (netpoll_trap())
                return;
 #endif
-       if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
+       if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
                __netif_schedule(txq->qdisc);
 }
 
 
                        return rc;
                }
                txq_trans_update(txq);
-               if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
+               if (unlikely(netif_xmit_stopped(txq) && skb->next))
                        return NETDEV_TX_BUSY;
        } while (skb->next);
 
 
                        HARD_TX_LOCK(dev, txq, cpu);
 
-                       if (!netif_tx_queue_stopped(txq)) {
+                       if (!netif_xmit_stopped(txq)) {
                                __this_cpu_inc(xmit_recursion);
                                rc = dev_hard_start_xmit(skb, dev, txq);
                                __this_cpu_dec(xmit_recursion);
 
 
                local_irq_save(flags);
                __netif_tx_lock(txq, smp_processor_id());
-               if (netif_tx_queue_frozen_or_stopped(txq) ||
+               if (netif_xmit_frozen_or_stopped(txq) ||
                    ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
                        skb_queue_head(&npinfo->txq, skb);
                        __netif_tx_unlock(txq);
                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
                     tries > 0; --tries) {
                        if (__netif_tx_trylock(txq)) {
-                               if (!netif_tx_queue_stopped(txq)) {
+                               if (!netif_xmit_stopped(txq)) {
                                        status = ops->ndo_start_xmit(skb, dev);
                                        if (status == NETDEV_TX_OK)
                                                txq_trans_update(txq);
 
 
        __netif_tx_lock_bh(txq);
 
-       if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
+       if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
                ret = NETDEV_TX_BUSY;
                pkt_dev->last_ok = 0;
                goto unlock;
 
 
                /* check the reason of requeuing without tx lock first */
                txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
-               if (!netif_tx_queue_frozen_or_stopped(txq)) {
+               if (!netif_xmit_frozen_or_stopped(txq)) {
                        q->gso_skb = NULL;
                        q->q.qlen--;
                } else
        spin_unlock(root_lock);
 
        HARD_TX_LOCK(dev, txq, smp_processor_id());
-       if (!netif_tx_queue_frozen_or_stopped(txq))
+       if (!netif_xmit_frozen_or_stopped(txq))
                ret = dev_hard_start_xmit(skb, dev, txq);
 
        HARD_TX_UNLOCK(dev, txq);
                ret = dev_requeue_skb(skb, q);
        }
 
-       if (ret && netif_tx_queue_frozen_or_stopped(txq))
+       if (ret && netif_xmit_frozen_or_stopped(txq))
                ret = 0;
 
        return ret;
                                 * old device drivers set dev->trans_start
                                 */
                                trans_start = txq->trans_start ? : dev->trans_start;
-                               if (netif_tx_queue_stopped(txq) &&
+                               if (netif_xmit_stopped(txq) &&
                                    time_after(jiffies, (trans_start +
                                                         dev->watchdog_timeo))) {
                                        some_queue_timedout = 1;
 
                /* Check that target subqueue is available before
                 * pulling an skb to avoid head-of-line blocking.
                 */
-               if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
+               if (!netif_xmit_stopped(
+                   netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
                        qdisc = q->queues[q->curband];
                        skb = qdisc->dequeue(qdisc);
                        if (skb) {
                /* Check that target subqueue is available before
                 * pulling an skb to avoid head-of-line blocking.
                 */
-               if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) {
+               if (!netif_xmit_stopped(
+                   netdev_get_tx_queue(qdisc_dev(sch), curband))) {
                        qdisc = q->queues[curband];
                        skb = qdisc->ops->peek(qdisc);
                        if (skb)
 
 
                if (slave_txq->qdisc_sleeping != q)
                        continue;
-               if (__netif_subqueue_stopped(slave, subq) ||
+               if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
                    !netif_running(slave)) {
                        busy = 1;
                        continue;
                        if (__netif_tx_trylock(slave_txq)) {
                                unsigned int length = qdisc_pkt_len(skb);
 
-                               if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
+                               if (!netif_xmit_frozen_or_stopped(slave_txq) &&
                                    slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
                                        txq_trans_update(slave_txq);
                                        __netif_tx_unlock(slave_txq);
                                }
                                __netif_tx_unlock(slave_txq);
                        }
-                       if (netif_queue_stopped(dev))
+                       if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
                                busy = 1;
                        break;
                case 1: