static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats,
                                     struct gnet_stats_basic_cpu __percpu *cpu)
 {
+       u64 t_bytes = 0, t_packets = 0;
        int i;
 
        for_each_possible_cpu(i) {
                        packets = bcpu->bstats.packets;
                } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
 
-               bstats->bytes += bytes;
-               bstats->packets += packets;
+               t_bytes += bytes;
+               t_packets += packets;
        }
+       _bstats_update(bstats, t_bytes, t_packets);
 }
 
 void gnet_stats_add_basic(const seqcount_t *running,
                packets = b->packets;
        } while (running && read_seqcount_retry(running, seq));
 
-       bstats->bytes += bytes;
-       bstats->packets += packets;
+       _bstats_update(bstats, bytes, packets);
 }
 EXPORT_SYMBOL(gnet_stats_add_basic);
 
 
                long avgidle = cl->avgidle;
                long idle;
 
-               cl->bstats.packets++;
-               cl->bstats.bytes += len;
+               _bstats_update(&cl->bstats, len, 1);
 
                /*
                 * (now - last) is total time between packet right edges.
 
 {
        struct gred_sched *table = qdisc_priv(sch);
        struct tc_gred_qopt_offload *hw_stats;
+       u64 bytes = 0, packets = 0;
        unsigned int i;
        int ret;
 
                table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
                table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
 
-               _bstats_update(&sch->bstats,
-                              hw_stats->stats.bstats[i].bytes,
-                              hw_stats->stats.bstats[i].packets);
+               bytes += hw_stats->stats.bstats[i].bytes;
+               packets += hw_stats->stats.bstats[i].packets;
                sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
                sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
                sch->qstats.drops += hw_stats->stats.qstats[i].drops;
                sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
                sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
        }
+       _bstats_update(&sch->bstats, bytes, packets);
 
        kfree(hw_stats);
        return ret;
 
 static void htb_offload_aggregate_stats(struct htb_sched *q,
                                        struct htb_class *cl)
 {
+       u64 bytes = 0, packets = 0;
        struct htb_class *c;
        unsigned int i;
 
                        if (p != cl)
                                continue;
 
-                       cl->bstats.bytes += c->bstats_bias.bytes;
-                       cl->bstats.packets += c->bstats_bias.packets;
+                       bytes += c->bstats_bias.bytes;
+                       packets += c->bstats_bias.packets;
                        if (c->level == 0) {
-                               cl->bstats.bytes += c->leaf.q->bstats.bytes;
-                               cl->bstats.packets += c->leaf.q->bstats.packets;
+                               bytes += c->leaf.q->bstats.bytes;
+                               packets += c->leaf.q->bstats.packets;
                        }
                }
        }
+       _bstats_update(&cl->bstats, bytes, packets);
 }
 
 static int
                                cl->bstats = cl->leaf.q->bstats;
                        else
                                gnet_stats_basic_packed_init(&cl->bstats);
-                       cl->bstats.bytes += cl->bstats_bias.bytes;
-                       cl->bstats.packets += cl->bstats_bias.packets;
+                       _bstats_update(&cl->bstats,
+                                      cl->bstats_bias.bytes,
+                                      cl->bstats_bias.packets);
                } else {
                        htb_offload_aggregate_stats(q, cl);
                }
                WARN_ON(old != q);
 
        if (cl->parent) {
-               cl->parent->bstats_bias.bytes += q->bstats.bytes;
-               cl->parent->bstats_bias.packets += q->bstats.packets;
+               _bstats_update(&cl->parent->bstats_bias,
+                              q->bstats.bytes,
+                              q->bstats.packets);
        }
 
        offload_opt = (struct tc_htb_qopt_offload) {
                                htb_graft_helper(dev_queue, old_q);
                                goto err_kill_estimator;
                        }
-                       parent->bstats_bias.bytes += old_q->bstats.bytes;
-                       parent->bstats_bias.packets += old_q->bstats.packets;
+                       _bstats_update(&parent->bstats_bias,
+                                      old_q->bstats.bytes,
+                                      old_q->bstats.packets);
                        qdisc_put(old_q);
                }
                new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
 
                return err;
        }
 
-       cl->bstats.bytes += len;
-       cl->bstats.packets += gso_segs;
+       _bstats_update(&cl->bstats, len, gso_segs);
        sch->qstats.backlog += len;
        ++sch->q.qlen;