mutex_unlock(&percpu_charge_mutex);
 }
 
-static int memcg_hotplug_cpu_dead(unsigned int cpu)
+static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg, int cpu)
 {
-       struct memcg_stock_pcp *stock;
-       struct mem_cgroup *memcg;
-
-       stock = &per_cpu(memcg_stock, cpu);
-       drain_stock(stock);
+       int nid;
 
-       for_each_mem_cgroup(memcg) {
+       for_each_node(nid) {
+               struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
+               unsigned long stat[NR_VM_NODE_STAT_ITEMS];
+               struct batched_lruvec_stat *lstatc;
                int i;
 
+               lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu);
                for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
-                       int nid;
+                       stat[i] = lstatc->count[i];
+                       lstatc->count[i] = 0;
+               }
 
-                       for_each_node(nid) {
-                               struct batched_lruvec_stat *lstatc;
-                               struct mem_cgroup_per_node *pn;
-                               long x;
+               do {
+                       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+                               atomic_long_add(stat[i], &pn->lruvec_stat[i]);
+               } while ((pn = parent_nodeinfo(pn, nid)));
+       }
+}
 
-                               pn = memcg->nodeinfo[nid];
-                               lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu);
+static int memcg_hotplug_cpu_dead(unsigned int cpu)
+{
+       struct memcg_stock_pcp *stock;
+       struct mem_cgroup *memcg;
 
-                               x = lstatc->count[i];
-                               lstatc->count[i] = 0;
+       stock = &per_cpu(memcg_stock, cpu);
+       drain_stock(stock);
 
-                               if (x) {
-                                       do {
-                                               atomic_long_add(x, &pn->lruvec_stat[i]);
-                                       } while ((pn = parent_nodeinfo(pn, nid)));
-                               }
-                       }
-               }
-       }
+       for_each_mem_cgroup(memcg)
+               memcg_flush_lruvec_page_state(memcg, cpu);
 
        return 0;
 }
        }
 }
 
-static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg)
-{
-       int node;
-
-       for_each_node(node) {
-               struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
-               unsigned long stat[NR_VM_NODE_STAT_ITEMS] = { 0 };
-               struct mem_cgroup_per_node *pi;
-               int cpu, i;
-
-               for_each_online_cpu(cpu)
-                       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
-                               stat[i] += per_cpu(
-                                       pn->lruvec_stat_cpu->count[i], cpu);
-
-               for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
-                       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
-                               atomic_long_add(stat[i], &pi->lruvec_stat[i]);
-       }
-}
-
 #ifdef CONFIG_MEMCG_KMEM
 static int memcg_online_kmem(struct mem_cgroup *memcg)
 {
 
 static void mem_cgroup_free(struct mem_cgroup *memcg)
 {
+       int cpu;
+
        memcg_wb_domain_exit(memcg);
        /*
         * Flush percpu lruvec stats to guarantee the value
         * correctness on parent's and all ancestor levels.
         */
-       memcg_flush_lruvec_page_state(memcg);
+       for_each_online_cpu(cpu)
+               memcg_flush_lruvec_page_state(memcg, cpu);
        __mem_cgroup_free(memcg);
 }