}
}
-static void do_flush_stats(struct mem_cgroup *memcg)
+static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
{
+ if (!force && !memcg_vmstats_needs_flush(memcg->vmstats))
+ return;
+
if (mem_cgroup_is_root(memcg))
WRITE_ONCE(flush_last_time, jiffies_64);
if (!memcg)
memcg = root_mem_cgroup;
- if (memcg_vmstats_needs_flush(memcg->vmstats))
- do_flush_stats(memcg);
+ __mem_cgroup_flush_stats(memcg, false);
}
void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
* Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
* in latency-sensitive paths is as cheap as possible.
*/
- do_flush_stats(root_mem_cgroup);
+ __mem_cgroup_flush_stats(root_mem_cgroup, true);
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
}
break;
}
- /*
- * mem_cgroup_flush_stats() ignores small changes. Use
- * do_flush_stats() directly to get accurate stats for charging.
- */
- do_flush_stats(memcg);
+ /* Force flush to get accurate stats for charging */
+ __mem_cgroup_flush_stats(memcg, true);
pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
if (pages < max)
continue;