zone;                                      \
             zone = next_zone(zone))
 
+#define for_each_populated_zone(zone)                  \
+       for (zone = (first_online_pgdat())->node_zones; \
+            zone;                                      \
+            zone = next_zone(zone))                    \
+               if (!populated_zone(zone))              \
+                       ; /* do nothing */              \
+               else
+
 static inline struct zone *zonelist_zone(struct zoneref *zoneref)
 {
        return zoneref->zone;
 
 
        INIT_LIST_HEAD(list);
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                unsigned long zone_start, zone_end;
                struct mem_extent *ext, *cur, *aux;
 
-               if (!populated_zone(zone))
-                       continue;
-
                zone_start = zone->zone_start_pfn;
                zone_end = zone->zone_start_pfn + zone->spanned_pages;
 
        struct zone *zone;
        unsigned int cnt = 0;
 
-       for_each_zone(zone)
-               if (populated_zone(zone) && is_highmem(zone))
+       for_each_populated_zone(zone)
+               if (is_highmem(zone))
                        cnt += zone_page_state(zone, NR_FREE_PAGES);
 
        return cnt;
 
                size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
                tmp = size;
                size += highmem_size;
-               for_each_zone (zone)
-                       if (populated_zone(zone)) {
-                               tmp += snapshot_additional_pages(zone);
-                               if (is_highmem(zone)) {
-                                       highmem_size -=
+               for_each_populated_zone(zone) {
+                       tmp += snapshot_additional_pages(zone);
+                       if (is_highmem(zone)) {
+                               highmem_size -=
                                        zone_page_state(zone, NR_FREE_PAGES);
-                               } else {
-                                       tmp -= zone_page_state(zone, NR_FREE_PAGES);
-                                       tmp += zone->lowmem_reserve[ZONE_NORMAL];
-                               }
+                       } else {
+                               tmp -= zone_page_state(zone, NR_FREE_PAGES);
+                               tmp += zone->lowmem_reserve[ZONE_NORMAL];
                        }
+               }
 
                if (highmem_size < 0)
                        highmem_size = 0;
 
        unsigned long flags;
        struct zone *zone;
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                struct per_cpu_pageset *pset;
                struct per_cpu_pages *pcp;
 
-               if (!populated_zone(zone))
-                       continue;
-
                pset = zone_pcp(zone, cpu);
 
                pcp = &pset->pcp;
        int cpu;
        struct zone *zone;
 
-       for_each_zone(zone) {
-               if (!populated_zone(zone))
-                       continue;
-
+       for_each_populated_zone(zone) {
                show_node(zone);
                printk("%s per-cpu:\n", zone->name);
 
                global_page_state(NR_PAGETABLE),
                global_page_state(NR_BOUNCE));
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                int i;
 
-               if (!populated_zone(zone))
-                       continue;
-
                show_node(zone);
                printk("%s"
                        " free:%lukB"
                printk("\n");
        }
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                unsigned long nr[MAX_ORDER], flags, order, total = 0;
 
-               if (!populated_zone(zone))
-                       continue;
-
                show_node(zone);
                printk("%s: ", zone->name);
 
 
        node_set_state(node, N_CPU);    /* this node has a cpu */
 
-       for_each_zone(zone) {
-
-               if (!populated_zone(zone))
-                       continue;
-
+       for_each_populated_zone(zone) {
                zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
                                         GFP_KERNEL, node);
                if (!zone_pcp(zone, cpu))
 
        struct zone *zone;
        unsigned long ret = 0;
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                enum lru_list l;
 
-               if (!populated_zone(zone))
-                       continue;
                if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
 
        int cpu;
        int threshold;
 
-       for_each_zone(zone) {
-
-               if (!zone->present_pages)
-                       continue;
-
+       for_each_populated_zone(zone) {
                threshold = calculate_threshold(zone);
 
                for_each_online_cpu(cpu)
        int i;
        int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                struct per_cpu_pageset *p;
 
-               if (!populated_zone(zone))
-                       continue;
-
                p = zone_pcp(zone, cpu);
 
                for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)