return true;
 }
 
+static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
+{
+       return skb->len;
+}
+
 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        return sch->enqueue(skb, sch);
                                       struct sk_buff_head *list)
 {
        __skb_queue_tail(list, skb);
-       sch->qstats.backlog += skb->len;
-       sch->bstats.bytes += skb->len;
+       sch->qstats.backlog += qdisc_pkt_len(skb);
+       sch->bstats.bytes += qdisc_pkt_len(skb);
        sch->bstats.packets++;
 
        return NET_XMIT_SUCCESS;
        struct sk_buff *skb = __skb_dequeue(list);
 
        if (likely(skb != NULL))
-               sch->qstats.backlog -= skb->len;
+               sch->qstats.backlog -= qdisc_pkt_len(skb);
 
        return skb;
 }
        struct sk_buff *skb = __skb_dequeue_tail(list);
 
        if (likely(skb != NULL))
-               sch->qstats.backlog -= skb->len;
+               sch->qstats.backlog -= qdisc_pkt_len(skb);
 
        return skb;
 }
                                  struct sk_buff_head *list)
 {
        __skb_queue_head(list, skb);
-       sch->qstats.backlog += skb->len;
+       sch->qstats.backlog += qdisc_pkt_len(skb);
        sch->qstats.requeues++;
 
        return NET_XMIT_SUCCESS;
        struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
 
        if (likely(skb != NULL)) {
-               unsigned int len = skb->len;
+               unsigned int len = qdisc_pkt_len(skb);
                kfree_skb(skb);
                return len;
        }
 
 #else
        action = gact->tcf_action;
 #endif
-       gact->tcf_bstats.bytes += skb->len;
+       gact->tcf_bstats.bytes += qdisc_pkt_len(skb);
        gact->tcf_bstats.packets++;
        if (action == TC_ACT_SHOT)
                gact->tcf_qstats.drops++;
 
        spin_lock(&ipt->tcf_lock);
 
        ipt->tcf_tm.lastuse = jiffies;
-       ipt->tcf_bstats.bytes += skb->len;
+       ipt->tcf_bstats.bytes += qdisc_pkt_len(skb);
        ipt->tcf_bstats.packets++;
 
        /* yes, we have to worry about both in and out dev
 
                if (skb2 != NULL)
                        kfree_skb(skb2);
                m->tcf_qstats.overlimits++;
-               m->tcf_bstats.bytes += skb->len;
+               m->tcf_bstats.bytes += qdisc_pkt_len(skb);
                m->tcf_bstats.packets++;
                spin_unlock(&m->tcf_lock);
                /* should we be asking for packet to be dropped?
                goto bad_mirred;
        }
 
-       m->tcf_bstats.bytes += skb2->len;
+       m->tcf_bstats.bytes += qdisc_pkt_len(skb2);
        m->tcf_bstats.packets++;
        if (!(at & AT_EGRESS))
                if (m->tcfm_ok_push)
 
        egress = p->flags & TCA_NAT_FLAG_EGRESS;
        action = p->tcf_action;
 
-       p->tcf_bstats.bytes += skb->len;
+       p->tcf_bstats.bytes += qdisc_pkt_len(skb);
        p->tcf_bstats.packets++;
 
        spin_unlock(&p->tcf_lock);
 
 bad:
        p->tcf_qstats.overlimits++;
 done:
-       p->tcf_bstats.bytes += skb->len;
+       p->tcf_bstats.bytes += qdisc_pkt_len(skb);
        p->tcf_bstats.packets++;
        spin_unlock(&p->tcf_lock);
        return p->tcf_action;
 
 
        spin_lock(&police->tcf_lock);
 
-       police->tcf_bstats.bytes += skb->len;
+       police->tcf_bstats.bytes += qdisc_pkt_len(skb);
        police->tcf_bstats.packets++;
 
        if (police->tcfp_ewma_rate &&
                return police->tcf_action;
        }
 
-       if (skb->len <= police->tcfp_mtu) {
+       if (qdisc_pkt_len(skb) <= police->tcfp_mtu) {
                if (police->tcfp_R_tab == NULL) {
                        spin_unlock(&police->tcf_lock);
                        return police->tcfp_result;
                        ptoks = toks + police->tcfp_ptoks;
                        if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
                                ptoks = (long)L2T_P(police, police->tcfp_mtu);
-                       ptoks -= L2T_P(police, skb->len);
+                       ptoks -= L2T_P(police, qdisc_pkt_len(skb));
                }
                toks += police->tcfp_toks;
                if (toks > (long)police->tcfp_burst)
                        toks = police->tcfp_burst;
-               toks -= L2T(police, skb->len);
+               toks -= L2T(police, qdisc_pkt_len(skb));
                if ((toks|ptoks) >= 0) {
                        police->tcfp_t_c = now;
                        police->tcfp_toks = toks;
 
 
        spin_lock(&d->tcf_lock);
        d->tcf_tm.lastuse = jiffies;
-       d->tcf_bstats.bytes += skb->len;
+       d->tcf_bstats.bytes += qdisc_pkt_len(skb);
        d->tcf_bstats.packets++;
 
        /* print policy string followed by _ then packet count
 
                        flow->qstats.drops++;
                return ret;
        }
-       sch->bstats.bytes += skb->len;
+       sch->bstats.bytes += qdisc_pkt_len(skb);
        sch->bstats.packets++;
-       flow->bstats.bytes += skb->len;
+       flow->bstats.bytes += qdisc_pkt_len(skb);
        flow->bstats.packets++;
        /*
         * Okay, this may seem weird. We pretend we've dropped the packet if
 
 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
-       int len = skb->len;
        int uninitialized_var(ret);
        struct cbq_class *cl = cbq_classify(skb, sch, &ret);
 
        if (ret == NET_XMIT_SUCCESS) {
                sch->q.qlen++;
                sch->bstats.packets++;
-               sch->bstats.bytes+=len;
+               sch->bstats.bytes += qdisc_pkt_len(skb);
                cbq_mark_toplevel(q, cl);
                if (!cl->next_alive)
                        cbq_activate_class(cl);
 #ifdef CONFIG_NET_CLS_ACT
 static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
 {
-       int len = skb->len;
        struct Qdisc *sch = child->__parent;
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = q->rx_class;
                if (qdisc_enqueue(skb, cl->q) == 0) {
                        sch->q.qlen++;
                        sch->bstats.packets++;
-                       sch->bstats.bytes+=len;
+                       sch->bstats.bytes += qdisc_pkt_len(skb);
                        if (!cl->next_alive)
                                cbq_activate_class(cl);
                        return 0;
                        if (skb == NULL)
                                goto skip_class;
 
-                       cl->deficit -= skb->len;
+                       cl->deficit -= qdisc_pkt_len(skb);
                        q->tx_class = cl;
                        q->tx_borrowed = borrow;
                        if (borrow != cl) {
                                borrow->xstats.borrows++;
                                cl->xstats.borrows++;
 #else
-                               borrow->xstats.borrows += skb->len;
-                               cl->xstats.borrows += skb->len;
+                               borrow->xstats.borrows += qdisc_pkt_len(skb);
+                               cl->xstats.borrows += qdisc_pkt_len(skb);
 #endif
                        }
-                       q->tx_len = skb->len;
+                       q->tx_len = qdisc_pkt_len(skb);
 
                        if (cl->deficit <= 0) {
                                q->active[prio] = cl;
 
                return err;
        }
 
-       sch->bstats.bytes += skb->len;
+       sch->bstats.bytes += qdisc_pkt_len(skb);
        sch->bstats.packets++;
        sch->q.qlen++;
 
 
 {
        struct fifo_sched_data *q = qdisc_priv(sch);
 
-       if (likely(sch->qstats.backlog + skb->len <= q->limit))
+       if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= q->limit))
                return qdisc_enqueue_tail(skb, sch);
 
        return qdisc_reshape_fail(skb, sch);
 
        }
 
        q->packetsin++;
-       q->bytesin += skb->len;
+       q->bytesin += qdisc_pkt_len(skb);
 
        if (gred_wred_mode(t))
                gred_load_wred_set(t, q);
                        break;
        }
 
-       if (q->backlog + skb->len <= q->limit) {
-               q->backlog += skb->len;
+       if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
+               q->backlog += qdisc_pkt_len(skb);
                return qdisc_enqueue_tail(skb, sch);
        }
 
        } else {
                if (red_is_idling(&q->parms))
                        red_end_of_idle_period(&q->parms);
-               q->backlog += skb->len;
+               q->backlog += qdisc_pkt_len(skb);
        }
 
        return qdisc_requeue(skb, sch);
                                       "VQ 0x%x after dequeue, screwing up "
                                       "backlog.\n", tc_index_to_dp(skb));
                } else {
-                       q->backlog -= skb->len;
+                       q->backlog -= qdisc_pkt_len(skb);
 
                        if (!q->backlog && !gred_wred_mode(t))
                                red_start_of_idle_period(&q->parms);
 
        skb = qdisc_dequeue_tail(sch);
        if (skb) {
-               unsigned int len = skb->len;
+               unsigned int len = qdisc_pkt_len(skb);
                struct gred_sched_data *q;
                u16 dp = tc_index_to_dp(skb);
 
 
                        printk("qdisc_peek_len: non work-conserving qdisc ?\n");
                return 0;
        }
-       len = skb->len;
+       len = qdisc_pkt_len(skb);
        if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
                if (net_ratelimit())
                        printk("qdisc_peek_len: failed to requeue\n");
 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct hfsc_class *cl;
-       unsigned int len;
        int err;
 
        cl = hfsc_classify(skb, sch, &err);
                return err;
        }
 
-       len = skb->len;
        err = qdisc_enqueue(skb, cl->qdisc);
        if (unlikely(err != NET_XMIT_SUCCESS)) {
                cl->qstats.drops++;
        }
 
        if (cl->qdisc->q.qlen == 1)
-               set_active(cl, len);
+               set_active(cl, qdisc_pkt_len(skb));
 
        cl->bstats.packets++;
-       cl->bstats.bytes += len;
+       cl->bstats.bytes += qdisc_pkt_len(skb);
        sch->bstats.packets++;
-       sch->bstats.bytes += len;
+       sch->bstats.bytes += qdisc_pkt_len(skb);
        sch->q.qlen++;
 
        return NET_XMIT_SUCCESS;
                return NULL;
        }
 
-       update_vf(cl, skb->len, cur_time);
+       update_vf(cl, qdisc_pkt_len(skb), cur_time);
        if (realtime)
-               cl->cl_cumul += skb->len;
+               cl->cl_cumul += qdisc_pkt_len(skb);
 
        if (cl->qdisc->q.qlen != 0) {
                if (cl->cl_flags & HFSC_RSC) {
 
        } else {
                cl->bstats.packets +=
                        skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
-               cl->bstats.bytes += skb->len;
+               cl->bstats.bytes += qdisc_pkt_len(skb);
                htb_activate(q, cl);
        }
 
        sch->q.qlen++;
        sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
-       sch->bstats.bytes += skb->len;
+       sch->bstats.bytes += qdisc_pkt_len(skb);
        return NET_XMIT_SUCCESS;
 }
 
 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
                             int level, struct sk_buff *skb)
 {
-       int bytes = skb->len;
+       int bytes = qdisc_pkt_len(skb);
        long toks, diff;
        enum htb_cmode old_mode;
 
        } while (cl != start);
 
        if (likely(skb != NULL)) {
-               if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
+               cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
+               if (cl->un.leaf.deficit[level] < 0) {
                        cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
                        htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
                                          ptr[0]) + prio);
 
        result = tc_classify(skb, p->filter_list, &res);
 
        sch->bstats.packets++;
-       sch->bstats.bytes += skb->len;
+       sch->bstats.bytes += qdisc_pkt_len(skb);
        switch (result) {
        case TC_ACT_SHOT:
                result = TC_ACT_SHOT;
 
 
        if (likely(ret == NET_XMIT_SUCCESS)) {
                sch->q.qlen++;
-               sch->bstats.bytes += skb->len;
+               sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
        } else
                sch->qstats.drops++;
 
                __skb_queue_after(list, skb, nskb);
 
-               sch->qstats.backlog += nskb->len;
-               sch->bstats.bytes += nskb->len;
+               sch->qstats.backlog += qdisc_pkt_len(nskb);
+               sch->bstats.bytes += qdisc_pkt_len(nskb);
                sch->bstats.packets++;
 
                return NET_XMIT_SUCCESS;
 
 
        ret = qdisc_enqueue(skb, qdisc);
        if (ret == NET_XMIT_SUCCESS) {
-               sch->bstats.bytes += skb->len;
+               sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
                sch->q.qlen++;
                return NET_XMIT_SUCCESS;
 
 
        ret = qdisc_enqueue(skb, child);
        if (likely(ret == NET_XMIT_SUCCESS)) {
-               sch->bstats.bytes += skb->len;
+               sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
                sch->q.qlen++;
        } else {
 
        if (d > 1) {
                sfq_index x = q->dep[d + SFQ_DEPTH].next;
                skb = q->qs[x].prev;
-               len = skb->len;
+               len = qdisc_pkt_len(skb);
                __skb_unlink(skb, &q->qs[x]);
                kfree_skb(skb);
                sfq_dec(q, x);
                q->next[q->tail] = q->next[d];
                q->allot[q->next[d]] += q->quantum;
                skb = q->qs[d].prev;
-               len = skb->len;
+               len = qdisc_pkt_len(skb);
                __skb_unlink(skb, &q->qs[d]);
                kfree_skb(skb);
                sfq_dec(q, d);
        if (q->qs[x].qlen >= q->limit)
                return qdisc_drop(skb, sch);
 
-       sch->qstats.backlog += skb->len;
+       sch->qstats.backlog += qdisc_pkt_len(skb);
        __skb_queue_tail(&q->qs[x], skb);
        sfq_inc(q, x);
        if (q->qs[x].qlen == 1) {               /* The flow is new */
                }
        }
        if (++sch->q.qlen <= q->limit) {
-               sch->bstats.bytes += skb->len;
+               sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
                return 0;
        }
                q->hash[x] = hash;
        }
 
-       sch->qstats.backlog += skb->len;
+       sch->qstats.backlog += qdisc_pkt_len(skb);
        __skb_queue_head(&q->qs[x], skb);
        /* If selected queue has length q->limit+1, this means that
         * all another queues are empty and we do simple tail drop.
                skb = q->qs[x].prev;
                __skb_unlink(skb, &q->qs[x]);
                sch->qstats.drops++;
-               sch->qstats.backlog -= skb->len;
+               sch->qstats.backlog -= qdisc_pkt_len(skb);
                kfree_skb(skb);
                return NET_XMIT_CN;
        }
        skb = __skb_dequeue(&q->qs[a]);
        sfq_dec(q, a);
        sch->q.qlen--;
-       sch->qstats.backlog -= skb->len;
+       sch->qstats.backlog -= qdisc_pkt_len(skb);
 
        /* Is the slot empty? */
        if (q->qs[a].qlen == 0) {
                }
                q->next[q->tail] = a;
                q->allot[a] += q->quantum;
-       } else if ((q->allot[a] -= skb->len) <= 0) {
+       } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) {
                q->tail = a;
                a = q->next[a];
                q->allot[a] += q->quantum;
 
        struct tbf_sched_data *q = qdisc_priv(sch);
        int ret;
 
-       if (skb->len > q->max_size) {
+       if (qdisc_pkt_len(skb) > q->max_size) {
                sch->qstats.drops++;
 #ifdef CONFIG_NET_CLS_ACT
                if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
        }
 
        sch->q.qlen++;
-       sch->bstats.bytes += skb->len;
+       sch->bstats.bytes += qdisc_pkt_len(skb);
        sch->bstats.packets++;
        return 0;
 }
                psched_time_t now;
                long toks;
                long ptoks = 0;
-               unsigned int len = skb->len;
+               unsigned int len = qdisc_pkt_len(skb);
 
                now = psched_get_time();
                toks = psched_tdiff_bounded(now, q->t_c, q->buffer);
 
 
        if (q->q.qlen < dev->tx_queue_len) {
                __skb_queue_tail(&q->q, skb);
-               sch->bstats.bytes += skb->len;
+               sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
                return 0;
        }
        struct Qdisc *start, *q;
        int busy;
        int nores;
-       int len = skb->len;
        int subq = skb_get_queue_mapping(skb);
        struct sk_buff *skb_res = NULL;
 
                                        master->slaves = NEXT_SLAVE(q);
                                        netif_wake_queue(dev);
                                        master->stats.tx_packets++;
-                                       master->stats.tx_bytes += len;
+                                       master->stats.tx_bytes +=
+                                               qdisc_pkt_len(skb);
                                        return 0;
                                }
                                netif_tx_unlock(slave);