}
 
 /**
- * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
+ * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
+ * @node: node of the wanted lruvec
  * @zone: zone of the wanted lruvec
  * @memcg: memcg of the wanted lruvec
  *
- * Returns the lru list vector holding pages for the given @zone and
- * @mem.  This can be the global zone lruvec, if the memory controller
+ * Returns the lru list vector holding pages for a given @node or a given
+ * @memcg and @zone. This can be the node lruvec, if the memory controller
  * is disabled.
  */
-static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
-                                                   struct mem_cgroup *memcg)
+static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
+                               struct zone *zone, struct mem_cgroup *memcg)
 {
        struct mem_cgroup_per_zone *mz;
        struct lruvec *lruvec;
 
        if (mem_cgroup_disabled()) {
-               lruvec = zone_lruvec(zone);
+               lruvec = node_lruvec(pgdat);
                goto out;
        }
 
 {
 }
 
-static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
-                                                   struct mem_cgroup *memcg)
+static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
+                               struct zone *zone, struct mem_cgroup *memcg)
 {
-       return zone_lruvec(zone);
+       return node_lruvec(pgdat);
 }
 
 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
 
 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
 
 /*
- * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
+ * This is a basic per-node page freer.  Used by both kswapd and direct reclaim.
  */
-static void shrink_zone_memcg(struct zone *zone, struct mem_cgroup *memcg,
+static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
                              struct scan_control *sc, unsigned long *lru_pages)
 {
-       struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+       struct zone *zone = &pgdat->node_zones[sc->reclaim_idx];
+       struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, zone, memcg);
        unsigned long nr[NR_LRU_LISTS];
        unsigned long targets[NR_LRU_LISTS];
        unsigned long nr_to_scan;
  * calls try_to_compact_zone() that it will have enough free pages to succeed.
  * It will give up earlier than that if there is difficulty reclaiming pages.
  */
-static inline bool should_continue_reclaim(struct zone *zone,
+static inline bool should_continue_reclaim(struct pglist_data *pgdat,
                                        unsigned long nr_reclaimed,
                                        unsigned long nr_scanned,
                                        struct scan_control *sc)
 {
        unsigned long pages_for_compaction;
        unsigned long inactive_lru_pages;
+       int z;
 
        /* If not in reclaim/compaction mode, stop */
        if (!in_reclaim_compaction(sc))
         * inactive lists are large enough, continue reclaiming
         */
        pages_for_compaction = (2UL << sc->order);
-       inactive_lru_pages = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE);
+       inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
        if (get_nr_swap_pages() > 0)
-               inactive_lru_pages += node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
+               inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
        if (sc->nr_reclaimed < pages_for_compaction &&
                        inactive_lru_pages > pages_for_compaction)
                return true;
 
        /* If compaction would go ahead or the allocation would succeed, stop */
-       switch (compaction_suitable(zone, sc->order, 0, 0)) {
-       case COMPACT_PARTIAL:
-       case COMPACT_CONTINUE:
-               return false;
-       default:
-               return true;
+       for (z = 0; z <= sc->reclaim_idx; z++) {
+               struct zone *zone = &pgdat->node_zones[z];
+               if (!populated_zone(zone))
+                       continue;
+
+               switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
+               case COMPACT_PARTIAL:
+               case COMPACT_CONTINUE:
+                       return false;
+               default:
+                       /* check next zone */
+                       ;
+               }
        }
+       return true;
 }
 
 static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc,
        struct reclaim_state *reclaim_state = current->reclaim_state;
        unsigned long nr_reclaimed, nr_scanned;
        bool reclaimable = false;
-       struct zone *zone = &pgdat->node_zones[classzone_idx];
 
        do {
                struct mem_cgroup *root = sc->target_mem_cgroup;
                struct mem_cgroup_reclaim_cookie reclaim = {
-                       .zone = zone,
+                       .zone = &pgdat->node_zones[classzone_idx],
                        .priority = sc->priority,
                };
-               unsigned long zone_lru_pages = 0;
+               unsigned long node_lru_pages = 0;
                struct mem_cgroup *memcg;
 
                nr_reclaimed = sc->nr_reclaimed;
                        reclaimed = sc->nr_reclaimed;
                        scanned = sc->nr_scanned;
 
-                       shrink_zone_memcg(zone, memcg, sc, &lru_pages);
-                       zone_lru_pages += lru_pages;
+                       shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
+                       node_lru_pages += lru_pages;
 
                        if (!global_reclaim(sc))
-                               shrink_slab(sc->gfp_mask, zone_to_nid(zone),
+                               shrink_slab(sc->gfp_mask, pgdat->node_id,
                                            memcg, sc->nr_scanned - scanned,
                                            lru_pages);
 
                        /*
                         * Direct reclaim and kswapd have to scan all memory
                         * cgroups to fulfill the overall scan target for the
-                        * zone.
+                        * node.
                         *
                         * Limit reclaim, on the other hand, only cares about
                         * nr_to_reclaim pages to be reclaimed and it will
                 * the eligible LRU pages were scanned.
                 */
                if (global_reclaim(sc))
-                       shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL,
+                       shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
                                    sc->nr_scanned - nr_scanned,
-                                   zone_lru_pages);
+                                   node_lru_pages);
 
                if (reclaim_state) {
                        sc->nr_reclaimed += reclaim_state->reclaimed_slab;
                if (sc->nr_reclaimed - nr_reclaimed)
                        reclaimable = true;
 
-       } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
+       } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
                                         sc->nr_scanned - nr_scanned, sc));
 
        return reclaimable;
 
 #ifdef CONFIG_MEMCG
 
-unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
+unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
                                                gfp_t gfp_mask, bool noswap,
                                                struct zone *zone,
                                                unsigned long *nr_scanned)
        /*
         * NOTE: Although we can get the priority field, using it
         * here is not a good idea, since it limits the pages we can scan.
-        * if we don't reclaim here, the shrink_zone from balance_pgdat
+        * if we don't reclaim here, the shrink_node from balance_pgdat
         * will pick up pages from other mem cgroup's as well. We hack
         * the priority and make it zero.
         */
-       shrink_zone_memcg(zone, memcg, &sc, &lru_pages);
+       shrink_node_memcg(zone->zone_pgdat, memcg, &sc, &lru_pages);
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
 
        memcg = mem_cgroup_iter(NULL, NULL, NULL);
        do {
-               struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+               struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, zone, memcg);
 
                if (inactive_list_is_low(lruvec, false))
                        shrink_active_list(SWAP_CLUSTER_MAX, lruvec,