};
 
 struct memcg_vmstats_percpu;
+struct memcg1_events_percpu;
 struct memcg_vmstats;
 struct lruvec_stats_percpu;
 struct lruvec_stats;
        struct list_head objcg_list;
 
        struct memcg_vmstats_percpu __percpu *vmstats_percpu;
+       struct memcg1_events_percpu __percpu *events_percpu;
 
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct list_head cgwb_list;
 
        MEM_CGROUP_NTARGETS,
 };
 
+/* Cgroup1: threshold notifications & softlimit tree updates */
+struct memcg1_events_percpu {
+       unsigned long nr_page_events;
+       unsigned long targets[MEM_CGROUP_NTARGETS];
+};
+
 bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
                                enum mem_cgroup_events_target target);
 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
 int memory_stat_show(struct seq_file *m, void *v);
 
+static inline bool memcg1_alloc_events(struct mem_cgroup *memcg)
+{
+       memcg->events_percpu = alloc_percpu_gfp(struct memcg1_events_percpu,
+                                               GFP_KERNEL_ACCOUNT);
+       return !!memcg->events_percpu;
+}
+
+static inline void memcg1_free_events(struct mem_cgroup *memcg)
+{
+       if (memcg->events_percpu)
+               free_percpu(memcg->events_percpu);
+}
+
 /* Cgroup v1-specific declarations */
 #ifdef CONFIG_MEMCG_V1
 void memcg1_memcg_init(struct mem_cgroup *memcg);
 
        /* Delta calculation for lockless upward propagation */
        long                    state_prev[MEMCG_VMSTAT_SIZE];
        unsigned long           events_prev[NR_MEMCG_EVENTS];
-
-       /* Cgroup1: threshold notifications & softlimit tree updates */
-       unsigned long           nr_page_events;
-       unsigned long           targets[MEM_CGROUP_NTARGETS];
 } ____cacheline_aligned;
 
 struct memcg_vmstats {
                nr_pages = -nr_pages; /* for event */
        }
 
-       __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
+       __this_cpu_add(memcg->events_percpu->nr_page_events, nr_pages);
 }
 
 bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 {
        unsigned long val, next;
 
-       val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
-       next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
+       val = __this_cpu_read(memcg->events_percpu->nr_page_events);
+       next = __this_cpu_read(memcg->events_percpu->targets[target]);
        /* from time_after() in jiffies.h */
        if ((long)(next - val) < 0) {
                switch (target) {
                default:
                        break;
                }
-               __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
+               __this_cpu_write(memcg->events_percpu->targets[target], next);
                return true;
        }
        return false;
 
        for_each_node(node)
                free_mem_cgroup_per_node_info(memcg, node);
+       memcg1_free_events(memcg);
        kfree(memcg->vmstats);
        free_percpu(memcg->vmstats_percpu);
        kfree(memcg);
        if (!memcg->vmstats_percpu)
                goto fail;
 
+       if (!memcg1_alloc_events(memcg))
+               goto fail;
+
        for_each_possible_cpu(cpu) {
                if (parent)
                        pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
 
        local_irq_save(flags);
        __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
-       __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
+       __this_cpu_add(ug->memcg->events_percpu->nr_page_events, ug->nr_memory);
        memcg1_check_events(ug->memcg, ug->nid);
        local_irq_restore(flags);