mod_lruvec_page_state(page, idx, -1);
 }
 
+static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
+{
+       struct mem_cgroup *memcg;
+
+       memcg = lruvec_memcg(lruvec);
+       if (!memcg)
+               return NULL;
+       memcg = parent_mem_cgroup(memcg);
+       if (!memcg)
+               return NULL;
+       return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
+}
+
 #ifdef CONFIG_CGROUP_WRITEBACK
 
 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
 
 {
        struct lruvec *lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
 
-       /* Record new data point */
-       if (page_is_file_lru(page))
-               lruvec->file_cost++;
-       else
-               lruvec->anon_cost++;
+       do {
+               unsigned long lrusize;
+
+               /* Record cost event */
+               if (page_is_file_lru(page))
+                       lruvec->file_cost++;
+               else
+                       lruvec->anon_cost++;
+
+               /*
+                * Decay previous events
+                *
+                * Because workloads change over time (and to avoid
+                * overflow) we keep these statistics as a floating
+                * average, which ends up weighing recent refaults
+                * more than old ones.
+                */
+               lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
+                         lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
+                         lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
+                         lruvec_page_state(lruvec, NR_ACTIVE_FILE);
+
+               if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
+                       lruvec->file_cost /= 2;
+                       lruvec->anon_cost /= 2;
+               }
+       } while ((lruvec = parent_lruvec(lruvec)));
 }
 
 static void __activate_page(struct page *page, struct lruvec *lruvec,
 
         */
        struct mem_cgroup *target_mem_cgroup;
 
+       /*
+        * Scan pressure balancing between anon and file LRUs
+        */
+       unsigned long   anon_cost;
+       unsigned long   file_cost;
+
        /* Can active pages be deactivated as part of reclaim? */
 #define DEACTIVATE_ANON 1
 #define DEACTIVATE_FILE 2
        int swappiness = mem_cgroup_swappiness(memcg);
        u64 fraction[2];
        u64 denominator = 0;    /* gcc */
-       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
        unsigned long anon_prio, file_prio;
        enum scan_balance scan_balance;
-       unsigned long anon, file;
        unsigned long totalcost;
        unsigned long ap, fp;
        enum lru_list lru;
        }
 
        scan_balance = SCAN_FRACT;
-
        /*
         * Calculate the pressure balance between anon and file pages.
         *
        anon_prio = swappiness;
        file_prio = 200 - anon_prio;
 
-       /*
-        * Because workloads change over time (and to avoid overflow)
-        * we keep these statistics as a floating average, which ends
-        * up weighing recent refaults more than old ones.
-        */
-
-       anon  = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
-               lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
-       file  = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
-               lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
-
-       spin_lock_irq(&pgdat->lru_lock);
-       totalcost = lruvec->anon_cost + lruvec->file_cost;
-       if (unlikely(totalcost > (anon + file) / 4)) {
-               lruvec->anon_cost /= 2;
-               lruvec->file_cost /= 2;
-               totalcost /= 2;
-       }
+       totalcost = sc->anon_cost + sc->file_cost;
        ap = anon_prio * (totalcost + 1);
-       ap /= lruvec->anon_cost + 1;
+       ap /= sc->anon_cost + 1;
 
        fp = file_prio * (totalcost + 1);
-       fp /= lruvec->file_cost + 1;
-       spin_unlock_irq(&pgdat->lru_lock);
+       fp /= sc->file_cost + 1;
 
        fraction[0] = ap;
        fraction[1] = fp;
        nr_reclaimed = sc->nr_reclaimed;
        nr_scanned = sc->nr_scanned;
 
+       /*
+        * Determine the scan balance between anon and file LRUs.
+        */
+       spin_lock_irq(&pgdat->lru_lock);
+       sc->anon_cost = target_lruvec->anon_cost;
+       sc->file_cost = target_lruvec->file_cost;
+       spin_unlock_irq(&pgdat->lru_lock);
+
        /*
         * Target desirable inactive:active list ratios for the anon
         * and file LRU lists.