skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
 
-               spin_lock(&dev->queue_lock);
+               spin_lock(&dev->ingress_lock);
                if ((q = dev->qdisc_ingress) != NULL)
                        result = q->enqueue(skb, q);
-               spin_unlock(&dev->queue_lock);
+               spin_unlock(&dev->ingress_lock);
 
        }
 
 
 static inline
 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
 {
-       spin_lock_bh(&dev->queue_lock);
+       qdisc_lock_tree(dev);
        memset(head->fastmap, 0, sizeof(head->fastmap));
-       spin_unlock_bh(&dev->queue_lock);
+       qdisc_unlock_tree(dev);
 }
 
 static inline void
 
 
        if (handle == TC_H_INGRESS) {
                sch->flags |= TCQ_F_INGRESS;
+               sch->stats_lock = &dev->ingress_lock;
                handle = TC_H_MAKE(TC_H_INGRESS, 0);
-       } else if (handle == 0) {
-               handle = qdisc_alloc_handle(dev);
-               err = -ENOMEM;
-               if (handle == 0)
-                       goto err_out3;
+       } else {
+               sch->stats_lock = &dev->queue_lock;
+               if (handle == 0) {
+                       handle = qdisc_alloc_handle(dev);
+                       err = -ENOMEM;
+                       if (handle == 0)
+                               goto err_out3;
+               }
        }
 
        sch->handle = handle;
                        return err;
                if (q) {
                        qdisc_notify(skb, n, clid, q, NULL);
-                       spin_lock_bh(&dev->queue_lock);
+                       qdisc_lock_tree(dev);
                        qdisc_destroy(q);
-                       spin_unlock_bh(&dev->queue_lock);
+                       qdisc_unlock_tree(dev);
                }
        } else {
                qdisc_notify(skb, n, clid, NULL, q);
                err = qdisc_graft(dev, p, clid, q, &old_q);
                if (err) {
                        if (q) {
-                               spin_lock_bh(&dev->queue_lock);
+                               qdisc_lock_tree(dev);
                                qdisc_destroy(q);
-                               spin_unlock_bh(&dev->queue_lock);
+                               qdisc_unlock_tree(dev);
                        }
                        return err;
                }
                qdisc_notify(skb, n, clid, old_q, q);
                if (old_q) {
-                       spin_lock_bh(&dev->queue_lock);
+                       qdisc_lock_tree(dev);
                        qdisc_destroy(old_q);
-                       spin_unlock_bh(&dev->queue_lock);
+                       qdisc_unlock_tree(dev);
                }
        }
        return 0;
 
  * The idea is the following:
  * - enqueue, dequeue are serialized via top level device
  *   spinlock dev->queue_lock.
+ * - ingress filtering is serialized via top level device
+ *   spinlock dev->ingress_lock.
  * - updates to tree and tree walking are only done under the rtnl mutex.
  */
 
 void qdisc_lock_tree(struct net_device *dev)
 {
        spin_lock_bh(&dev->queue_lock);
+       spin_lock(&dev->ingress_lock);
 }
 
 void qdisc_unlock_tree(struct net_device *dev)
 {
+       spin_unlock(&dev->ingress_lock);
        spin_unlock_bh(&dev->queue_lock);
 }
 
        sch->dequeue = ops->dequeue;
        sch->dev = dev;
        dev_hold(dev);
-       sch->stats_lock = &dev->queue_lock;
        atomic_set(&sch->refcnt, 1);
 
        return sch;
        sch = qdisc_alloc(dev, ops);
        if (IS_ERR(sch))
                goto errout;
+       sch->stats_lock = &dev->queue_lock;
        sch->parent = parentid;
 
        if (!ops->init || ops->init(sch, NULL) == 0)
 
                skb->dev ? (*pskb)->dev->name : "(no dev)",
                skb->len);
 
-/*
-revisit later: Use a private since lock dev->queue_lock is also
-used on the egress (might slow things for an iota)
-*/
-
        if (dev->qdisc_ingress) {
-               spin_lock(&dev->queue_lock);
+               spin_lock(&dev->ingress_lock);
                if ((q = dev->qdisc_ingress) != NULL)
                        fwres = q->enqueue(skb, q);
-               spin_unlock(&dev->queue_lock);
+               spin_unlock(&dev->ingress_lock);
        }
 
        return fwres;