extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                                                gfp_t gfp_mask, bool noswap,
                                                unsigned int swappiness,
-                                               struct zone *zone,
-                                               int nid);
+                                               struct zone *zone);
 extern int __isolate_lru_page(struct page *page, int mode, int file);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
 
                /* we use swappiness of local cgroup */
                if (check_soft)
                        ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
-                               noswap, get_swappiness(victim), zone,
-                               zone->zone_pgdat->node_id);
+                               noswap, get_swappiness(victim), zone);
                else
                        ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
                                                noswap, get_swappiness(victim));
 
 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                                                gfp_t gfp_mask, bool noswap,
                                                unsigned int swappiness,
-                                               struct zone *zone, int nid)
+                                               struct zone *zone)
 {
        struct scan_control sc = {
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .order = 0,
                .mem_cgroup = mem,
        };
-       nodemask_t nm  = nodemask_of_node(nid);
-
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
-       sc.nodemask = &nm;
 
        trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
                                                      sc.may_writepage,