if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                                continue;
 
-                       lru_pages += zone_lru_pages(zone);
+                       lru_pages += zone_reclaimable_pages(zone);
                }
        }
 
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
 
-                       lru_pages += zone_lru_pages(zone);
+                       lru_pages += zone_reclaimable_pages(zone);
                }
 
                /*
                        if (zone_is_all_unreclaimable(zone))
                                continue;
                        if (nr_slab == 0 && zone->pages_scanned >=
-                                               (zone_lru_pages(zone) * 6))
+                                       (zone_reclaimable_pages(zone) * 6))
                                        zone_set_flag(zone,
                                                      ZONE_ALL_UNRECLAIMABLE);
                        /*
        wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
-unsigned long global_lru_pages(void)
+/*
+ * The reclaimable count would be mostly accurate.
+ * The less reclaimable pages may be
+ * - mlocked pages, which will be moved to unevictable list when encountered
+ * - mapped pages, which may require several travels to be reclaimed
+ * - dirty pages, which is not "instantly" reclaimable
+ */
+unsigned long global_reclaimable_pages(void)
 {
-       return global_page_state(NR_ACTIVE_ANON)
-               + global_page_state(NR_ACTIVE_FILE)
-               + global_page_state(NR_INACTIVE_ANON)
-               + global_page_state(NR_INACTIVE_FILE);
+       int nr;
+
+       nr = global_page_state(NR_ACTIVE_FILE) +
+            global_page_state(NR_INACTIVE_FILE);
+
+       if (nr_swap_pages > 0)
+               nr += global_page_state(NR_ACTIVE_ANON) +
+                     global_page_state(NR_INACTIVE_ANON);
+
+       return nr;
+}
+
+unsigned long zone_reclaimable_pages(struct zone *zone)
+{
+       int nr;
+
+       nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+            zone_page_state(zone, NR_INACTIVE_FILE);
+
+       if (nr_swap_pages > 0)
+               nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+                     zone_page_state(zone, NR_INACTIVE_ANON);
+
+       return nr;
 }
 
 #ifdef CONFIG_HIBERNATION
 
        current->reclaim_state = &reclaim_state;
 
-       lru_pages = global_lru_pages();
+       lru_pages = global_reclaimable_pages();
        nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
        /* If slab caches are huge, it's better to hit them first */
        while (nr_slab >= lru_pages) {
 
                        reclaim_state.reclaimed_slab = 0;
                        shrink_slab(sc.nr_scanned, sc.gfp_mask,
-                                       global_lru_pages());
+                                   global_reclaimable_pages());
                        sc.nr_reclaimed += reclaim_state.reclaimed_slab;
                        if (sc.nr_reclaimed >= nr_pages)
                                goto out;
        if (!sc.nr_reclaimed) {
                do {
                        reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
+                       shrink_slab(nr_pages, sc.gfp_mask,
+                                   global_reclaimable_pages());
                        sc.nr_reclaimed += reclaim_state.reclaimed_slab;
                } while (sc.nr_reclaimed < nr_pages &&
                                reclaim_state.reclaimed_slab > 0);