* Zero the differential counters of the dead processor
* so that the vm statistics are consistent.
*/
- cpu_vm_stats_fold(cpu);
+ cpu_vm_stats_fold(cpu, false);
for_each_populated_zone(zone)
zone_pcp_update(zone, 0);
* There cannot be any access by the offline cpu and therefore
* synchronization is simplified.
*/
-void cpu_vm_stats_fold(int cpu)
+void cpu_vm_stats_fold(int cpu, bool do_pagesets)
{
struct pglist_data *pgdat;
struct zone *zone;
for_each_populated_zone(zone) {
struct per_cpu_zonestat *pzstats;
+#ifdef CONFIG_NUMA
+ struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
+#endif
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
v = xchg(&pzstats->vm_stat_diff[i], 0);
atomic_long_add(v, &zone->vm_stat[i]);
global_zone_diff[i] += v;
+#ifdef CONFIG_NUMA
+ /* 3 seconds idle till flush */
+ if (do_pagesets)
+ pcp->expire = 3;
+#endif
}
}
#ifdef CONFIG_NUMA
zone_numa_event_add(v, zone, i);
}
}
+
+ if (do_pagesets) {
+ cond_resched();
+ /*
+ * Deal with draining the remote pageset of a
+ * processor
+ *
+ * Check if there are pages remaining in this pageset
+ * if not then there is nothing to expire.
+ */
+ if (!pcp->expire || !pcp->count)
+ continue;
+
+ /*
+ * We never drain zones local to this processor.
+ */
+ if (zone_to_nid(zone) == cpu_to_node(cpu)) {
+ pcp->expire = 0;
+ continue;
+ }
+
+ WARN_ON(pcp->expire < 0);
+ /*
+ * pcp->expire is only accessed from vmstat_shepherd context,
+ * therefore no locking is required.
+ */
+ if (--pcp->expire)
+ continue;
+
+ if (pcp->count)
+ drain_zone_pages(zone, pcp);
+ }
#endif
}
cpus_read_lock();
for_each_online_cpu(cpu) {
- cpu_vm_stats_fold(cpu);
+ cpu_vm_stats_fold(cpu, true);
cond_resched();
}
cpus_read_unlock();