void refresh_cpu_vm_stats(int);
 void refresh_zone_stat_thresholds(void);
 
+void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
+
 int calculate_pressure_threshold(struct zone *zone);
 int calculate_normal_threshold(struct zone *zone);
 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
 static inline void refresh_cpu_vm_stats(int cpu) { }
 static inline void refresh_zone_stat_thresholds(void) { }
 
+static inline void drain_zonestat(struct zone *zone,
+                       struct per_cpu_pageset *pset) { }
 #endif         /* CONFIG_SMP */
 
 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
 
                local_irq_save(flags);
                if (pcp->count > 0)
                        free_pcppages_bulk(zone, pcp->count, pcp);
+               drain_zonestat(zone, pset);
                setup_pageset(pset, batch);
                local_irq_restore(flags);
        }
 void zone_pcp_reset(struct zone *zone)
 {
        unsigned long flags;
+       int cpu;
+       struct per_cpu_pageset *pset;
 
        /* avoid races with drain_pages()  */
        local_irq_save(flags);
        if (zone->pageset != &boot_pageset) {
+               for_each_online_cpu(cpu) {
+                       pset = per_cpu_ptr(zone->pageset, cpu);
+                       drain_zonestat(zone, pset);
+               }
                free_percpu(zone->pageset);
                zone->pageset = &boot_pageset;
        }
 
                        atomic_long_add(global_diff[i], &vm_stat[i]);
 }
 
+void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
+{
+       int i;
+
+       for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+               if (pset->vm_stat_diff[i]) {
+                       int v = pset->vm_stat_diff[i];
+                       pset->vm_stat_diff[i] = 0;
+                       atomic_long_add(v, &zone->vm_stat[i]);
+                       atomic_long_add(v, &vm_stat[i]);
+               }
+}
 #endif
 
 #ifdef CONFIG_NUMA