return shrink_inactive_list(nr_to_scan, mz, sc, priority, lru);
 }
 
-static int vmscan_swappiness(struct mem_cgroup_zone *mz,
-                            struct scan_control *sc)
+static int vmscan_swappiness(struct scan_control *sc)
 {
        if (global_reclaim(sc))
                return vm_swappiness;
-       return mem_cgroup_swappiness(mz->mem_cgroup);
+       return mem_cgroup_swappiness(sc->target_mem_cgroup);
 }
 
 /*
         * With swappiness at 100, anonymous and file have the same priority.
         * This scanning priority is essentially the inverse of IO cost.
         */
-       anon_prio = vmscan_swappiness(mz, sc);
-       file_prio = 200 - vmscan_swappiness(mz, sc);
+       anon_prio = vmscan_swappiness(sc);
+       file_prio = 200 - vmscan_swappiness(sc);
 
        /*
         * OK, so we have swap space and a fair amount of page cache
                unsigned long scan;
 
                scan = zone_nr_lru_pages(mz, lru);
-               if (priority || noswap || !vmscan_swappiness(mz, sc)) {
+               if (priority || noswap || !vmscan_swappiness(sc)) {
                        scan >>= priority;
                        if (!scan && force_scan)
                                scan = SWAP_CLUSTER_MAX;