bstats_update(&sch->bstats, skb);
 }
 
+static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
+                                           const struct sk_buff *skb)
+{
+       sch->qstats.backlog -= qdisc_pkt_len(skb);
+}
+
+static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
+                                           const struct sk_buff *skb)
+{
+       sch->qstats.backlog += qdisc_pkt_len(skb);
+}
+
+static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
+{
+       sch->qstats.drops += count;
+}
+
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+       sch->qstats.drops++;
+}
+
+static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
+{
+       sch->qstats.overlimits++;
+}
+
 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
                                       struct sk_buff_head *list)
 {
        __skb_queue_tail(list, skb);
-       sch->qstats.backlog += qdisc_pkt_len(skb);
+       qdisc_qstats_backlog_inc(sch, skb);
 
        return NET_XMIT_SUCCESS;
 }
        struct sk_buff *skb = __skb_dequeue(list);
 
        if (likely(skb != NULL)) {
-               sch->qstats.backlog -= qdisc_pkt_len(skb);
+               qdisc_qstats_backlog_dec(sch, skb);
                qdisc_bstats_update(sch, skb);
        }
 
 
        if (likely(skb != NULL)) {
                unsigned int len = qdisc_pkt_len(skb);
-               sch->qstats.backlog -= len;
+               qdisc_qstats_backlog_dec(sch, skb);
                kfree_skb(skb);
                return len;
        }
        struct sk_buff *skb = __skb_dequeue_tail(list);
 
        if (likely(skb != NULL))
-               sch->qstats.backlog -= qdisc_pkt_len(skb);
+               qdisc_qstats_backlog_dec(sch, skb);
 
        return skb;
 }
 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
 {
        kfree_skb(skb);
-       sch->qstats.drops++;
+       qdisc_qstats_drop(sch);
 
        return NET_XMIT_DROP;
 }
 
 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
 {
-       sch->qstats.drops++;
+       qdisc_qstats_drop(sch);
 
 #ifdef CONFIG_NET_CLS_ACT
        if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
 
                        cops->put(sch, cl);
                }
                sch->q.qlen -= n;
-               sch->qstats.drops += drops;
+               __qdisc_qstats_drop(sch, drops);
        }
 }
 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
 
        if (ret != NET_XMIT_SUCCESS) {
 drop: __maybe_unused
                if (net_xmit_drop_count(ret)) {
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                        if (flow)
                                flow->qstats.drops++;
                }
 
 #endif
        if (cl == NULL) {
                if (ret & __NET_XMIT_BYPASS)
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                kfree_skb(skb);
                return ret;
        }
        }
 
        if (net_xmit_drop_count(ret)) {
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
                cbq_mark_toplevel(q, cl);
                cl->qstats.drops++;
        }
                        return 0;
                }
                if (net_xmit_drop_count(ret))
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                return 0;
        }
 
-       sch->qstats.drops++;
+       qdisc_qstats_drop(sch);
        return -1;
 }
 #endif
         */
 
        if (sch->q.qlen) {
-               sch->qstats.overlimits++;
+               qdisc_qstats_overlimit(sch);
                if (q->wd_expires)
                        qdisc_watchdog_schedule(&q->watchdog,
                                                now + q->wd_expires);
 
        if (idx == q->tail)
                choke_zap_tail_holes(q);
 
-       sch->qstats.backlog -= qdisc_pkt_len(skb);
+       qdisc_qstats_backlog_dec(sch, skb);
        qdisc_drop(skb, sch);
        qdisc_tree_decrease_qlen(sch, 1);
        --sch->q.qlen;
                if (q->vars.qavg > p->qth_max) {
                        q->vars.qcount = -1;
 
-                       sch->qstats.overlimits++;
+                       qdisc_qstats_overlimit(sch);
                        if (use_harddrop(q) || !use_ecn(q) ||
                            !INET_ECN_set_ce(skb)) {
                                q->stats.forced_drop++;
                                q->vars.qcount = 0;
                                q->vars.qR = red_random(p);
 
-                               sch->qstats.overlimits++;
+                               qdisc_qstats_overlimit(sch);
                                if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
                                        q->stats.prob_drop++;
                                        goto congestion_drop;
                q->tab[q->tail] = skb;
                q->tail = (q->tail + 1) & q->tab_mask;
                ++sch->q.qlen;
-               sch->qstats.backlog += qdisc_pkt_len(skb);
+               qdisc_qstats_backlog_inc(sch, skb);
                return NET_XMIT_SUCCESS;
        }
 
 
 other_drop:
        if (ret & __NET_XMIT_BYPASS)
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
        kfree_skb(skb);
        return ret;
 }
        q->tab[q->head] = NULL;
        choke_zap_head_holes(q);
        --sch->q.qlen;
-       sch->qstats.backlog -= qdisc_pkt_len(skb);
+       qdisc_qstats_backlog_dec(sch, skb);
        qdisc_bstats_update(sch, skb);
 
        return skb;
                                        ntab[tail++] = skb;
                                        continue;
                                }
-                               sch->qstats.backlog -= qdisc_pkt_len(skb);
+                               qdisc_qstats_backlog_dec(sch, skb);
                                --sch->q.qlen;
                                qdisc_drop(skb, sch);
                        }
 
        while (sch->q.qlen > sch->limit) {
                struct sk_buff *skb = __skb_dequeue(&sch->q);
 
-               sch->qstats.backlog -= qdisc_pkt_len(skb);
+               qdisc_qstats_backlog_dec(sch, skb);
                qdisc_drop(skb, sch);
        }
        qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
 
        cl = drr_classify(skb, sch, &err);
        if (cl == NULL) {
                if (err & __NET_XMIT_BYPASS)
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                kfree_skb(skb);
                return err;
        }
        if (unlikely(err != NET_XMIT_SUCCESS)) {
                if (net_xmit_drop_count(err)) {
                        cl->qstats.drops++;
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                }
                return err;
        }
 
        err = qdisc_enqueue(skb, p->q);
        if (err != NET_XMIT_SUCCESS) {
                if (net_xmit_drop_count(err))
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                return err;
        }
 
 
 
        /* queue full, remove one skb to fulfill the limit */
        __qdisc_queue_drop_head(sch, &sch->q);
-       sch->qstats.drops++;
+       qdisc_qstats_drop(sch);
        qdisc_enqueue_tail(skb, sch);
 
        return NET_XMIT_CN;
 
                flow->head = skb->next;
                skb->next = NULL;
                flow->qlen--;
-               sch->qstats.backlog -= qdisc_pkt_len(skb);
+               qdisc_qstats_backlog_dec(sch, skb);
                sch->q.qlen--;
        }
        return skb;
        f->qlen++;
        if (skb_is_retransmit(skb))
                q->stat_tcp_retrans++;
-       sch->qstats.backlog += qdisc_pkt_len(skb);
+       qdisc_qstats_backlog_inc(sch, skb);
        if (fq_flow_is_detached(f)) {
                fq_flow_add_tail(&q->new_flows, f);
                if (time_after(jiffies, f->age + q->flow_refill_delay))
 
        q->backlogs[idx] -= len;
        kfree_skb(skb);
        sch->q.qlen--;
-       sch->qstats.drops++;
-       sch->qstats.backlog -= len;
+       qdisc_qstats_drop(sch);
+       qdisc_qstats_backlog_dec(sch, skb);
        flow->dropped++;
        return idx;
 }
        idx = fq_codel_classify(skb, sch, &ret);
        if (idx == 0) {
                if (ret & __NET_XMIT_BYPASS)
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                kfree_skb(skb);
                return ret;
        }
        flow = &q->flows[idx];
        flow_queue_add(flow, skb);
        q->backlogs[idx] += qdisc_pkt_len(skb);
-       sch->qstats.backlog += qdisc_pkt_len(skb);
+       qdisc_qstats_backlog_inc(sch, skb);
 
        if (list_empty(&flow->flowchain)) {
                list_add_tail(&flow->flowchain, &q->new_flows);
 
                break;
 
        case RED_PROB_MARK:
-               sch->qstats.overlimits++;
+               qdisc_qstats_overlimit(sch);
                if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
                        q->stats.prob_drop++;
                        goto congestion_drop;
                break;
 
        case RED_HARD_MARK:
-               sch->qstats.overlimits++;
+               qdisc_qstats_overlimit(sch);
                if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
                    !INET_ECN_set_ce(skb)) {
                        q->stats.forced_drop++;
 
        cl = hfsc_classify(skb, sch, &err);
        if (cl == NULL) {
                if (err & __NET_XMIT_BYPASS)
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                kfree_skb(skb);
                return err;
        }
        if (unlikely(err != NET_XMIT_SUCCESS)) {
                if (net_xmit_drop_count(err)) {
                        cl->qstats.drops++;
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                }
                return err;
        }
                 */
                cl = vttree_get_minvt(&q->root, cur_time);
                if (cl == NULL) {
-                       sch->qstats.overlimits++;
+                       qdisc_qstats_overlimit(sch);
                        hfsc_schedule_watchdog(sch);
                        return NULL;
                }
                                list_move_tail(&cl->dlist, &q->droplist);
                        }
                        cl->qstats.drops++;
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                        sch->q.qlen--;
                        return len;
                }
 
                struct sk_buff *skb = dequeue_head(bucket);
 
                sch->q.qlen--;
-               sch->qstats.drops++;
-               sch->qstats.backlog -= qdisc_pkt_len(skb);
+               qdisc_qstats_drop(sch);
+               qdisc_qstats_backlog_dec(sch, skb);
                kfree_skb(skb);
        }
 
 
        bucket = &q->buckets[idx];
        bucket_add(bucket, skb);
-       sch->qstats.backlog += qdisc_pkt_len(skb);
+       qdisc_qstats_backlog_inc(sch, skb);
 
        if (list_empty(&bucket->bucketchain)) {
                unsigned int weight;
        if (bucket->head) {
                skb = dequeue_head(bucket);
                sch->q.qlen--;
-               sch->qstats.backlog -= qdisc_pkt_len(skb);
+               qdisc_qstats_backlog_dec(sch, skb);
        }
 
        if (!skb) {
 
 #ifdef CONFIG_NET_CLS_ACT
        } else if (!cl) {
                if (ret & __NET_XMIT_BYPASS)
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                kfree_skb(skb);
                return ret;
 #endif
        } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
                if (net_xmit_drop_count(ret)) {
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                        cl->qstats.drops++;
                }
                return ret;
                                goto ok;
                }
        }
-       sch->qstats.overlimits++;
+       qdisc_qstats_overlimit(sch);
        if (likely(next_event > q->now)) {
                if (!test_bit(__QDISC_STATE_DEACTIVATED,
                              &qdisc_root_sleeping(q->watchdog.qdisc)->state)) {
 
        switch (result) {
        case TC_ACT_SHOT:
                result = TC_ACT_SHOT;
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
                break;
        case TC_ACT_STOLEN:
        case TC_ACT_QUEUED:
 
        if (qdisc == NULL) {
 
                if (ret & __NET_XMIT_BYPASS)
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                kfree_skb(skb);
                return ret;
        }
                return NET_XMIT_SUCCESS;
        }
        if (net_xmit_drop_count(ret))
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
        return ret;
 }
 
 
        /* Drop packet? */
        if (loss_event(q)) {
                if (q->ecn && INET_ECN_set_ce(skb))
-                       sch->qstats.drops++; /* mark packet */
+                       qdisc_qstats_drop(sch); /* mark packet */
                else
                        --count;
        }
        if (count == 0) {
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
                kfree_skb(skb);
                return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        }
        if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
                return qdisc_reshape_fail(skb, sch);
 
-       sch->qstats.backlog += qdisc_pkt_len(skb);
+       qdisc_qstats_backlog_inc(sch, skb);
 
        cb = netem_skb_cb(skb);
        if (q->gap == 0 ||              /* not doing reordering */
                        sch->q.qlen--;
                        skb->next = NULL;
                        skb->prev = NULL;
-                       len = qdisc_pkt_len(skb);
-                       sch->qstats.backlog -= len;
+                       qdisc_qstats_backlog_dec(sch, skb);
                        kfree_skb(skb);
                }
        }
        if (!len && q->qdisc && q->qdisc->ops->drop)
            len = q->qdisc->ops->drop(q->qdisc);
        if (len)
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
 
        return len;
 }
        skb = __skb_dequeue(&sch->q);
        if (skb) {
 deliver:
-               sch->qstats.backlog -= qdisc_pkt_len(skb);
+               qdisc_qstats_backlog_dec(sch, skb);
                qdisc_unthrottled(sch);
                qdisc_bstats_update(sch, skb);
                return skb;
 
                                if (unlikely(err != NET_XMIT_SUCCESS)) {
                                        if (net_xmit_drop_count(err)) {
-                                               sch->qstats.drops++;
+                                               qdisc_qstats_drop(sch);
                                                qdisc_tree_decrease_qlen(sch, 1);
                                        }
                                }
 
        while (sch->q.qlen > sch->limit) {
                struct sk_buff *skb = __skb_dequeue(&sch->q);
 
-               sch->qstats.backlog -= qdisc_pkt_len(skb);
+               qdisc_qstats_backlog_dec(sch, skb);
                qdisc_drop(skb, sch);
        }
        qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
 
        if (qdisc == NULL) {
 
                if (ret & __NET_XMIT_BYPASS)
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                kfree_skb(skb);
                return ret;
        }
                return NET_XMIT_SUCCESS;
        }
        if (net_xmit_drop_count(ret))
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
        return ret;
 }
 
 
        cl = qfq_classify(skb, sch, &err);
        if (cl == NULL) {
                if (err & __NET_XMIT_BYPASS)
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                kfree_skb(skb);
                return err;
        }
                pr_debug("qfq_enqueue: enqueue failed %d\n", err);
                if (net_xmit_drop_count(err)) {
                        cl->qstats.drops++;
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                }
                return err;
        }
 
                break;
 
        case RED_PROB_MARK:
-               sch->qstats.overlimits++;
+               qdisc_qstats_overlimit(sch);
                if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
                        q->stats.prob_drop++;
                        goto congestion_drop;
                break;
 
        case RED_HARD_MARK:
-               sch->qstats.overlimits++;
+               qdisc_qstats_overlimit(sch);
                if (red_use_harddrop(q) || !red_use_ecn(q) ||
                    !INET_ECN_set_ce(skb)) {
                        q->stats.forced_drop++;
                sch->q.qlen++;
        } else if (net_xmit_drop_count(ret)) {
                q->stats.pdrop++;
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
        }
        return ret;
 
 
        if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
                q->stats.other++;
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
                sch->q.qlen--;
                return len;
        }
 
        struct flow_keys keys;
 
        if (unlikely(sch->q.qlen >= q->limit)) {
-               sch->qstats.overlimits++;
+               qdisc_qstats_overlimit(sch);
                q->stats.queuedrop++;
                goto drop;
        }
        sfb_skb_cb(skb)->hashes[slot] = 0;
 
        if (unlikely(minqlen >= q->max)) {
-               sch->qstats.overlimits++;
+               qdisc_qstats_overlimit(sch);
                q->stats.bucketdrop++;
                goto drop;
        }
                        }
                }
                if (sfb_rate_limit(skb, q)) {
-                       sch->qstats.overlimits++;
+                       qdisc_qstats_overlimit(sch);
                        q->stats.penaltydrop++;
                        goto drop;
                }
                increment_qlen(skb, q);
        } else if (net_xmit_drop_count(ret)) {
                q->stats.childdrop++;
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
        }
        return ret;
 
        return NET_XMIT_CN;
 other_drop:
        if (ret & __NET_XMIT_BYPASS)
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
        kfree_skb(skb);
        return ret;
 }
 
                sfq_dec(q, x);
                kfree_skb(skb);
                sch->q.qlen--;
-               sch->qstats.drops++;
-               sch->qstats.backlog -= len;
+               qdisc_qstats_drop(sch);
+               qdisc_qstats_backlog_dec(sch, skb);
                return len;
        }
 
        hash = sfq_classify(skb, sch, &ret);
        if (hash == 0) {
                if (ret & __NET_XMIT_BYPASS)
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                kfree_skb(skb);
                return ret;
        }
                        break;
 
                case RED_PROB_MARK:
-                       sch->qstats.overlimits++;
+                       qdisc_qstats_overlimit(sch);
                        if (sfq_prob_mark(q)) {
                                /* We know we have at least one packet in queue */
                                if (sfq_headdrop(q) &&
                        goto congestion_drop;
 
                case RED_HARD_MARK:
-                       sch->qstats.overlimits++;
+                       qdisc_qstats_overlimit(sch);
                        if (sfq_hard_mark(q)) {
                                /* We know we have at least one packet in queue */
                                if (sfq_headdrop(q) &&
        }
 
 enqueue:
-       sch->qstats.backlog += qdisc_pkt_len(skb);
+       qdisc_qstats_backlog_inc(sch, skb);
        slot->backlog += qdisc_pkt_len(skb);
        slot_queue_add(slot, skb);
        sfq_inc(q, x);
        sfq_dec(q, a);
        qdisc_bstats_update(sch, skb);
        sch->q.qlen--;
-       sch->qstats.backlog -= qdisc_pkt_len(skb);
+       qdisc_qstats_backlog_dec(sch, skb);
        slot->backlog -= qdisc_pkt_len(skb);
        /* Is the slot empty? */
        if (slot->qlen == 0) {
                if (x == SFQ_EMPTY_SLOT) {
                        x = q->dep[0].next; /* get a free slot */
                        if (x >= SFQ_MAX_FLOWS) {
-drop:                          sch->qstats.backlog -= qdisc_pkt_len(skb);
+drop:
+                               qdisc_qstats_backlog_dec(sch, skb);
                                kfree_skb(skb);
                                dropped++;
                                continue;
 
                ret = qdisc_enqueue(segs, q->qdisc);
                if (ret != NET_XMIT_SUCCESS) {
                        if (net_xmit_drop_count(ret))
-                               sch->qstats.drops++;
+                               qdisc_qstats_drop(sch);
                } else {
                        nb++;
                }
        ret = qdisc_enqueue(skb, q->qdisc);
        if (ret != NET_XMIT_SUCCESS) {
                if (net_xmit_drop_count(ret))
-                       sch->qstats.drops++;
+                       qdisc_qstats_drop(sch);
                return ret;
        }
 
 
        if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
                sch->q.qlen--;
-               sch->qstats.drops++;
+               qdisc_qstats_drop(sch);
        }
        return len;
 }
                   (cf. CSZ, HPFQ, HFSC)
                 */
 
-               sch->qstats.overlimits++;
+               qdisc_qstats_overlimit(sch);
        }
        return NULL;
 }