void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
                int nr_pages);
 
+unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+                                          int nid, unsigned int lru_mask);
+
 static inline
 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
 {
 }
 
+static inline unsigned long
+mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+                            int nid, unsigned int lru_mask)
+{
+       return 0;
+}
+
 static inline void
 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 {
 
        __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 }
 
-static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
-                                                 int nid,
-                                                 unsigned int lru_mask)
+unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+                                          int nid, unsigned int lru_mask)
 {
        unsigned long nr = 0;
        int zid;
 
        shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
        local_irq_enable();
 
-       pages = node_page_state(sc->nid, NR_ACTIVE_FILE) +
-               node_page_state(sc->nid, NR_INACTIVE_FILE);
+       if (memcg_kmem_enabled())
+               pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
+                                                    LRU_ALL_FILE);
+       else
+               pages = node_page_state(sc->nid, NR_ACTIVE_FILE) +
+                       node_page_state(sc->nid, NR_INACTIVE_FILE);
 
        /*
         * Active cache pages are limited to 50% of memory, and shadow
        .count_objects = count_shadow_nodes,
        .scan_objects = scan_shadow_nodes,
        .seeks = DEFAULT_SEEKS,
-       .flags = SHRINKER_NUMA_AWARE,
+       .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
 };
 
 /*