int rc;
 
        qdisc_calculate_pkt_len(skb, q);
+
+       if (q->flags & TCQ_F_NOLOCK) {
+               if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+                       __qdisc_drop(skb, &to_free);
+                       rc = NET_XMIT_DROP;
+               } else {
+                       rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+                       __qdisc_run(q);
+               }
+
+               if (unlikely(to_free))
+                       kfree_skb_list(to_free);
+               return rc;
+       }
+
        /*
         * Heuristic to force contended enqueues to serialize on a
         * separate lock before trying to get qdisc main lock.
 
                while (head) {
                        struct Qdisc *q = head;
-                       spinlock_t *root_lock;
+                       spinlock_t *root_lock = NULL;
 
                        head = head->next_sched;
 
-                       root_lock = qdisc_lock(q);
-                       spin_lock(root_lock);
+                       if (!(q->flags & TCQ_F_NOLOCK)) {
+                               root_lock = qdisc_lock(q);
+                               spin_lock(root_lock);
+                       }
                        /* We need to make sure head->next_sched is read
                         * before clearing __QDISC_STATE_SCHED
                         */
                        smp_mb__before_atomic();
                        clear_bit(__QDISC_STATE_SCHED, &q->state);
                        qdisc_run(q);
-                       spin_unlock(root_lock);
+                       if (root_lock)
+                               spin_unlock(root_lock);
                }
        }
 }
 
        int ret = NETDEV_TX_BUSY;
 
        /* And release qdisc */
-       spin_unlock(root_lock);
+       if (root_lock)
+               spin_unlock(root_lock);
 
        /* Note that we validate skb (GSO, checksum, ...) outside of locks */
        if (validate)
 
                HARD_TX_UNLOCK(dev, txq);
        } else {
-               spin_lock(root_lock);
+               if (root_lock)
+                       spin_lock(root_lock);
                return qdisc_qlen(q);
        }
-       spin_lock(root_lock);
+
+       if (root_lock)
+               spin_lock(root_lock);
 
        if (dev_xmit_complete(ret)) {
                /* Driver sent out skb successfully or skb was consumed */
  */
 static inline int qdisc_restart(struct Qdisc *q, int *packets)
 {
+       spinlock_t *root_lock = NULL;
        struct netdev_queue *txq;
        struct net_device *dev;
-       spinlock_t *root_lock;
        struct sk_buff *skb;
        bool validate;
 
        if (unlikely(!skb))
                return 0;
 
-       root_lock = qdisc_lock(q);
+       if (!(q->flags & TCQ_F_NOLOCK))
+               root_lock = qdisc_lock(q);
+
        dev = qdisc_dev(q);
        txq = skb_get_tx_queue(dev, skb);
 
 
                dev_queue = netdev_get_tx_queue(dev, i);
                q = dev_queue->qdisc_sleeping;
-               root_lock = qdisc_lock(q);
 
-               spin_lock_bh(root_lock);
+               if (q->flags & TCQ_F_NOLOCK) {
+                       val = test_bit(__QDISC_STATE_SCHED, &q->state);
+               } else {
+                       root_lock = qdisc_lock(q);
+                       spin_lock_bh(root_lock);
 
-               val = (qdisc_is_running(q) ||
-                      test_bit(__QDISC_STATE_SCHED, &q->state));
+                       val = (qdisc_is_running(q) ||
+                              test_bit(__QDISC_STATE_SCHED, &q->state));
 
-               spin_unlock_bh(root_lock);
+                       spin_unlock_bh(root_lock);
+               }
 
                if (val)
                        return true;