return &dev->_tx[index];
 }
 
+static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
+                                                   const struct sk_buff *skb)
+{
+       return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+}
+
 static inline void netdev_for_each_tx_queue(struct net_device *dev,
                                            void (*f)(struct net_device *,
                                                      struct netdev_queue *,
 
                        continue;
                }
 
-               txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+               txq = skb_get_tx_queue(dev, skb);
 
                local_irq_save(flags);
                HARD_TX_LOCK(dev, txq, smp_processor_id());
 
 {
        struct net_device *odev = pkt_dev->odev;
        struct netdev_queue *txq;
-       u16 queue_map;
        int ret;
 
        /* If device is offline, then don't send */
        if (pkt_dev->delay && pkt_dev->last_ok)
                spin(pkt_dev, pkt_dev->next_tx);
 
-       queue_map = skb_get_queue_mapping(pkt_dev->skb);
-       txq = netdev_get_tx_queue(odev, queue_map);
+       txq = skb_get_tx_queue(odev, pkt_dev->skb);
 
        local_bh_disable();
 
 
        netdev_features_t features;
        struct netdev_queue *txq;
        int ret = NETDEV_TX_BUSY;
-       u16 queue_map;
 
        if (unlikely(!netif_running(dev) ||
                     !netif_carrier_ok(dev)))
            __skb_linearize(skb))
                goto drop;
 
-       queue_map = skb_get_queue_mapping(skb);
-       txq = netdev_get_tx_queue(dev, queue_map);
+       txq = skb_get_tx_queue(dev, skb);
 
        local_bh_disable();
 
 
 
        if (unlikely(skb)) {
                /* check the reason of requeuing without tx lock first */
-               txq = netdev_get_tx_queue(txq->dev, skb_get_queue_mapping(skb));
+               txq = skb_get_tx_queue(txq->dev, skb);
                if (!netif_xmit_frozen_or_stopped(txq)) {
                        q->gso_skb = NULL;
                        q->q.qlen--;
        skb = dequeue_skb(q);
        if (unlikely(!skb))
                return 0;
+
        WARN_ON_ONCE(skb_dst_is_noref(skb));
+
        root_lock = qdisc_lock(q);
        dev = qdisc_dev(q);
-       txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+       txq = skb_get_tx_queue(dev, skb);
 
        return sch_direct_xmit(skb, q, dev, txq, root_lock);
 }