int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
                                    struct zone *zone);
 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
-                                       int nid, int zid, unsigned int lrumask);
+unsigned long mem_cgroup_get_lruvec_size(struct lruvec *lruvec, enum lru_list);
 struct zone_reclaim_stat*
 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 }
 
 static inline unsigned long
-mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
-                               unsigned int lru_mask)
+mem_cgroup_get_lruvec_size(struct lruvec *lruvec, enum lru_list lru)
 {
        return 0;
 }
 
        return &mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup)->reclaim_stat;
 }
 
-static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
-                                      enum lru_list lru)
+static unsigned long get_lruvec_size(struct lruvec *lruvec, enum lru_list lru)
 {
        if (!mem_cgroup_disabled())
-               return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
-                                                   zone_to_nid(mz->zone),
-                                                   zone_idx(mz->zone),
-                                                   BIT(lru));
+               return mem_cgroup_get_lruvec_size(lruvec, lru);
 
-       return zone_page_state(mz->zone, NR_LRU_BASE + lru);
+       return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
 }
 
-
 /*
  * Add a shrinker callback to be called from the vm
  */
        enum lru_list lru;
        int noswap = 0;
        bool force_scan = false;
+       struct lruvec *lruvec;
+
+       lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
 
        /*
         * If the zone or memcg is small, nr[l] can be 0.  This
                goto out;
        }
 
-       anon  = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) +
-               zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
-       file  = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) +
-               zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
+       anon  = get_lruvec_size(lruvec, LRU_ACTIVE_ANON) +
+               get_lruvec_size(lruvec, LRU_INACTIVE_ANON);
+       file  = get_lruvec_size(lruvec, LRU_ACTIVE_FILE) +
+               get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
 
        if (global_reclaim(sc)) {
                free  = zone_page_state(mz->zone, NR_FREE_PAGES);
                int file = is_file_lru(lru);
                unsigned long scan;
 
-               scan = zone_nr_lru_pages(mz, lru);
+               scan = get_lruvec_size(lruvec, lru);
                if (sc->priority || noswap || !vmscan_swappiness(sc)) {
                        scan >>= sc->priority;
                        if (!scan && force_scan)
 {
        unsigned long pages_for_compaction;
        unsigned long inactive_lru_pages;
+       struct lruvec *lruvec;
 
        /* If not in reclaim/compaction mode, stop */
        if (!in_reclaim_compaction(sc))
         * If we have not reclaimed enough pages for compaction and the
         * inactive lists are large enough, continue reclaiming
         */
+       lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
        pages_for_compaction = (2UL << sc->order);
-       inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
+       inactive_lru_pages = get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
        if (nr_swap_pages > 0)
-               inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
+               inactive_lru_pages += get_lruvec_size(lruvec,
+                                                     LRU_INACTIVE_ANON);
        if (sc->nr_reclaimed < pages_for_compaction &&
                        inactive_lru_pages > pages_for_compaction)
                return true;