long count[NR_VM_NODE_STAT_ITEMS];
 };
 
+/*
+ * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
+ * which have elements charged to this memcg.
+ */
+struct memcg_shrinker_map {
+       struct rcu_head rcu;
+       unsigned long map[0];
+};
+
 /*
  * per-zone information in memory controller.
  */
 
        struct mem_cgroup_reclaim_iter  iter[DEF_PRIORITY + 1];
 
+#ifdef CONFIG_MEMCG_KMEM
+       struct memcg_shrinker_map __rcu *shrinker_map;
+#endif
        struct rb_node          tree_node;      /* RB tree node */
        unsigned long           usage_in_excess;/* Set to the value by which */
                                                /* the soft limit is exceeded*/
        return memcg ? memcg->kmemcg_id : -1;
 }
 
+extern int memcg_expand_shrinker_maps(int new_id);
+
 #else
 #define for_each_memcg_cache_index(_idx)       \
        for (; NULL; )
 
 
 struct workqueue_struct *memcg_kmem_cache_wq;
 
+static int memcg_shrinker_map_size;
+static DEFINE_MUTEX(memcg_shrinker_map_mutex);
+
+static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
+{
+       kvfree(container_of(head, struct memcg_shrinker_map, rcu));
+}
+
+static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
+                                        int size, int old_size)
+{
+       struct memcg_shrinker_map *new, *old;
+       int nid;
+
+       lockdep_assert_held(&memcg_shrinker_map_mutex);
+
+       for_each_node(nid) {
+               old = rcu_dereference_protected(
+                       mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
+               /* Not yet online memcg */
+               if (!old)
+                       return 0;
+
+               new = kvmalloc(sizeof(*new) + size, GFP_KERNEL);
+               if (!new)
+                       return -ENOMEM;
+
+               /* Set all old bits, clear all new bits */
+               memset(new->map, (int)0xff, old_size);
+               memset((void *)new->map + old_size, 0, size - old_size);
+
+               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
+               call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
+       }
+
+       return 0;
+}
+
+static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
+{
+       struct mem_cgroup_per_node *pn;
+       struct memcg_shrinker_map *map;
+       int nid;
+
+       if (mem_cgroup_is_root(memcg))
+               return;
+
+       for_each_node(nid) {
+               pn = mem_cgroup_nodeinfo(memcg, nid);
+               map = rcu_dereference_protected(pn->shrinker_map, true);
+               if (map)
+                       kvfree(map);
+               rcu_assign_pointer(pn->shrinker_map, NULL);
+       }
+}
+
+static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
+{
+       struct memcg_shrinker_map *map;
+       int nid, size, ret = 0;
+
+       if (mem_cgroup_is_root(memcg))
+               return 0;
+
+       mutex_lock(&memcg_shrinker_map_mutex);
+       size = memcg_shrinker_map_size;
+       for_each_node(nid) {
+               map = kvzalloc(sizeof(*map) + size, GFP_KERNEL);
+               if (!map) {
+                       memcg_free_shrinker_maps(memcg);
+                       ret = -ENOMEM;
+                       break;
+               }
+               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
+       }
+       mutex_unlock(&memcg_shrinker_map_mutex);
+
+       return ret;
+}
+
+int memcg_expand_shrinker_maps(int new_id)
+{
+       int size, old_size, ret = 0;
+       struct mem_cgroup *memcg;
+
+       size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
+       old_size = memcg_shrinker_map_size;
+       if (size <= old_size)
+               return 0;
+
+       mutex_lock(&memcg_shrinker_map_mutex);
+       if (!root_mem_cgroup)
+               goto unlock;
+
+       for_each_mem_cgroup(memcg) {
+               if (mem_cgroup_is_root(memcg))
+                       continue;
+               ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
+               if (ret)
+                       goto unlock;
+       }
+unlock:
+       if (!ret)
+               memcg_shrinker_map_size = size;
+       mutex_unlock(&memcg_shrinker_map_mutex);
+       return ret;
+}
+#else /* CONFIG_MEMCG_KMEM */
+static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
+{
+       return 0;
+}
+static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) { }
 #endif /* CONFIG_MEMCG_KMEM */
 
 /**
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
+       /*
+        * A memcg must be visible for memcg_expand_shrinker_maps()
+        * by the time the maps are allocated. So, we allocate maps
+        * here, when for_each_mem_cgroup() can't skip it.
+        */
+       if (memcg_alloc_shrinker_maps(memcg)) {
+               mem_cgroup_id_remove(memcg);
+               return -ENOMEM;
+       }
+
        /* Online state pins memcg ID, memcg ID pins CSS */
        atomic_set(&memcg->id.ref, 1);
        css_get(css);
        vmpressure_cleanup(&memcg->vmpressure);
        cancel_work_sync(&memcg->high_work);
        mem_cgroup_remove_from_trees(memcg);
+       memcg_free_shrinker_maps(memcg);
        memcg_free_kmem(memcg);
        mem_cgroup_free(memcg);
 }