if (mem_cgroup_is_root(memcg)) {
                /*
-                * We can reach here from irq context through:
-                * uncharge_batch()
-                * |--memcg_check_events()
-                *    |--mem_cgroup_threshold()
-                *       |--__mem_cgroup_threshold()
-                *          |--mem_cgroup_usage
-                *
-                * rstat flushing is an expensive operation that should not be
-                * done from irq context; use stale stats in this case.
-                * Arguably, usage threshold events are not reliable on the root
-                * memcg anyway since its usage is ill-defined.
-                *
-                * Additionally, other call paths through memcg_check_events()
-                * disable irqs, so make sure we are flushing stats atomically.
+                * Approximate root's usage from global state. This isn't
+                * perfect, but the root usage was always an approximation.
                 */
-               if (in_task())
-                       mem_cgroup_flush_stats_atomic();
-               val = memcg_page_state(memcg, NR_FILE_PAGES) +
-                       memcg_page_state(memcg, NR_ANON_MAPPED);
+               val = global_node_page_state(NR_FILE_PAGES) +
+                       global_node_page_state(NR_ANON_MAPPED);
                if (swap)
-                       val += memcg_page_state(memcg, MEMCG_SWAP);
+                       val += total_swap_pages - get_nr_swap_pages();
        } else {
                if (!swap)
                        val = page_counter_read(&memcg->memory);