struct rcu_head rcu_head;
 };
 
+struct cgroup_base_stat {
+       struct task_cputime cputime;
+};
+
 /*
  * rstat - cgroup scalable recursive statistics.  Accounting is done
  * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
  * aren't active and stat may be read frequently.  The combination can
  * become very expensive.  By propagating selectively, increasing reading
  * frequency decreases the cost of each read.
+ *
+ * This struct hosts both the fields which implement the above -
+ * updated_children and updated_next - and the fields which track basic
+ * resource statistics on top of it - bsync, bstat and last_bstat.
  */
 struct cgroup_rstat_cpu {
        /*
-        * ->sync protects all the current counters.  These are the only
-        * fields which get updated in the hot path.
+        * ->bsync protects ->bstat.  These are the only fields which get
+        * updated in the hot path.
         */
-       struct u64_stats_sync sync;
-       struct task_cputime cputime;
+       struct u64_stats_sync bsync;
+       struct cgroup_base_stat bstat;
 
        /*
         * Snapshots at the last reading.  These are used to calculate the
         * deltas to propagate to the global counters.
         */
-       struct task_cputime last_cputime;
+       struct cgroup_base_stat last_bstat;
 
        /*
         * Child cgroups with stat updates on this cpu since the last read
        struct cgroup *updated_next;            /* NULL iff not on the list */
 };
 
-struct cgroup_stat {
-       /* per-cpu statistics are collected into the folowing global counters */
-       struct task_cputime cputime;
-       struct prev_cputime prev_cputime;
-};
-
 struct cgroup {
        /* self css with NULL ->ss, points back to this cgroup */
        struct cgroup_subsys_state self;
        struct cgroup_rstat_cpu __percpu *rstat_cpu;
 
        /* cgroup basic resource statistics */
-       struct cgroup_stat pending_stat;        /* pending from children */
-       struct cgroup_stat stat;
+       struct cgroup_base_stat pending_bstat;  /* pending from children */
+       struct cgroup_base_stat bstat;
+       struct prev_cputime prev_cputime;       /* for printing out cputime */
 
        /*
         * list of pidlists, up to two for each namespace (one for procs, one
 
        return pos;
 }
 
-static void cgroup_stat_accumulate(struct cgroup_stat *dst_stat,
-                                  struct cgroup_stat *src_stat)
+static void cgroup_base_stat_accumulate(struct cgroup_base_stat *dst_bstat,
+                                       struct cgroup_base_stat *src_bstat)
 {
-       dst_stat->cputime.utime += src_stat->cputime.utime;
-       dst_stat->cputime.stime += src_stat->cputime.stime;
-       dst_stat->cputime.sum_exec_runtime += src_stat->cputime.sum_exec_runtime;
+       dst_bstat->cputime.utime += src_bstat->cputime.utime;
+       dst_bstat->cputime.stime += src_bstat->cputime.stime;
+       dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
 }
 
-static void cgroup_cpu_stat_flush_one(struct cgroup *cgrp, int cpu)
+static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
 {
        struct cgroup *parent = cgroup_parent(cgrp);
        struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
-       struct task_cputime *last_cputime = &rstatc->last_cputime;
+       struct task_cputime *last_cputime = &rstatc->last_bstat.cputime;
        struct task_cputime cputime;
-       struct cgroup_stat delta;
+       struct cgroup_base_stat delta;
        unsigned seq;
 
        lockdep_assert_held(&cgroup_rstat_mutex);
 
        /* fetch the current per-cpu values */
        do {
-               seq = __u64_stats_fetch_begin(&rstatc->sync);
-               cputime = rstatc->cputime;
-       } while (__u64_stats_fetch_retry(&rstatc->sync, seq));
+               seq = __u64_stats_fetch_begin(&rstatc->bsync);
+               cputime = rstatc->bstat.cputime;
+       } while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
 
        /* accumulate the deltas to propgate */
        delta.cputime.utime = cputime.utime - last_cputime->utime;
        *last_cputime = cputime;
 
        /* transfer the pending stat into delta */
-       cgroup_stat_accumulate(&delta, &cgrp->pending_stat);
-       memset(&cgrp->pending_stat, 0, sizeof(cgrp->pending_stat));
+       cgroup_base_stat_accumulate(&delta, &cgrp->pending_bstat);
+       memset(&cgrp->pending_bstat, 0, sizeof(cgrp->pending_bstat));
 
        /* propagate delta into the global stat and the parent's pending */
-       cgroup_stat_accumulate(&cgrp->stat, &delta);
+       cgroup_base_stat_accumulate(&cgrp->bstat, &delta);
        if (parent)
-               cgroup_stat_accumulate(&parent->pending_stat, &delta);
+               cgroup_base_stat_accumulate(&parent->pending_bstat, &delta);
 }
 
 /* see cgroup_rstat_flush() */
 
                raw_spin_lock_irq(cpu_lock);
                while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
-                       cgroup_cpu_stat_flush_one(pos, cpu);
+                       cgroup_base_stat_flush(pos, cpu);
                raw_spin_unlock_irq(cpu_lock);
        }
 }
 }
 
 static struct cgroup_rstat_cpu *
-cgroup_cpu_stat_account_begin(struct cgroup *cgrp)
+cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
 {
        struct cgroup_rstat_cpu *rstatc;
 
        rstatc = get_cpu_ptr(cgrp->rstat_cpu);
-       u64_stats_update_begin(&rstatc->sync);
+       u64_stats_update_begin(&rstatc->bsync);
        return rstatc;
 }
 
-static void cgroup_cpu_stat_account_end(struct cgroup *cgrp,
-                                       struct cgroup_rstat_cpu *rstatc)
+static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
+                                                struct cgroup_rstat_cpu *rstatc)
 {
-       u64_stats_update_end(&rstatc->sync);
+       u64_stats_update_end(&rstatc->bsync);
        cgroup_rstat_cpu_updated(cgrp, smp_processor_id());
        put_cpu_ptr(rstatc);
 }
 {
        struct cgroup_rstat_cpu *rstatc;
 
-       rstatc = cgroup_cpu_stat_account_begin(cgrp);
-       rstatc->cputime.sum_exec_runtime += delta_exec;
-       cgroup_cpu_stat_account_end(cgrp, rstatc);
+       rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+       rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
+       cgroup_base_stat_cputime_account_end(cgrp, rstatc);
 }
 
 void __cgroup_account_cputime_field(struct cgroup *cgrp,
 {
        struct cgroup_rstat_cpu *rstatc;
 
-       rstatc = cgroup_cpu_stat_account_begin(cgrp);
+       rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
 
        switch (index) {
        case CPUTIME_USER:
        case CPUTIME_NICE:
-               rstatc->cputime.utime += delta_exec;
+               rstatc->bstat.cputime.utime += delta_exec;
                break;
        case CPUTIME_SYSTEM:
        case CPUTIME_IRQ:
        case CPUTIME_SOFTIRQ:
-               rstatc->cputime.stime += delta_exec;
+               rstatc->bstat.cputime.stime += delta_exec;
                break;
        default:
                break;
        }
 
-       cgroup_cpu_stat_account_end(cgrp, rstatc);
+       cgroup_base_stat_cputime_account_end(cgrp, rstatc);
 }
 
-void cgroup_stat_show_cputime(struct seq_file *seq)
+void cgroup_base_stat_cputime_show(struct seq_file *seq)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
        u64 usage, utime, stime;
 
        cgroup_rstat_flush_locked(cgrp);
 
-       usage = cgrp->stat.cputime.sum_exec_runtime;
-       cputime_adjust(&cgrp->stat.cputime, &cgrp->stat.prev_cputime,
-                      &utime, &stime);
+       usage = cgrp->bstat.cputime.sum_exec_runtime;
+       cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime);
 
        mutex_unlock(&cgroup_rstat_mutex);
 
                struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
 
                rstatc->updated_children = cgrp;
-               u64_stats_init(&rstatc->sync);
+               u64_stats_init(&rstatc->bsync);
        }
 
-       prev_cputime_init(&cgrp->stat.prev_cputime);
-
        return 0;
 }