}
 
 /*
- * Returns false when the remaining initialisation should be deferred until
+ * Returns true when the remaining initialisation should be deferred until
  * later in the boot cycle when it can be parallelised.
  */
-static inline bool update_defer_init(pg_data_t *pgdat,
-                               unsigned long pfn, unsigned long zone_end,
-                               unsigned long *nr_initialised)
+static bool __meminit
+defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 {
+       static unsigned long prev_end_pfn, nr_initialised;
+
+       /*
+        * prev_end_pfn static that contains the end of previous zone
+        * No need to protect because called very early in boot before smp_init.
+        */
+       if (prev_end_pfn != end_pfn) {
+               prev_end_pfn = end_pfn;
+               nr_initialised = 0;
+       }
+
        /* Always populate low zones for address-constrained allocations */
-       if (zone_end < pgdat_end_pfn(pgdat))
-               return true;
-       (*nr_initialised)++;
-       if ((*nr_initialised > pgdat->static_init_pgcnt) &&
-           (pfn & (PAGES_PER_SECTION - 1)) == 0) {
-               pgdat->first_deferred_pfn = pfn;
+       if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
                return false;
+       nr_initialised++;
+       if ((nr_initialised > NODE_DATA(nid)->static_init_pgcnt) &&
+           (pfn & (PAGES_PER_SECTION - 1)) == 0) {
+               NODE_DATA(nid)->first_deferred_pfn = pfn;
+               return true;
        }
-
-       return true;
+       return false;
 }
 #else
 static inline bool early_page_uninitialised(unsigned long pfn)
        return false;
 }
 
-static inline bool update_defer_init(pg_data_t *pgdat,
-                               unsigned long pfn, unsigned long zone_end,
-                               unsigned long *nr_initialised)
+static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 {
-       return true;
+       return false;
 }
 #endif
 
                struct vmem_altmap *altmap)
 {
        unsigned long end_pfn = start_pfn + size;
-       pg_data_t *pgdat = NODE_DATA(nid);
        unsigned long pfn;
-       unsigned long nr_initialised = 0;
        struct page *page;
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
        struct memblock_region *r = NULL, *tmp;
                        continue;
                if (!early_pfn_in_nid(pfn, nid))
                        continue;
-               if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
-                       break;
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
                /*
                        }
                }
 #endif
+               if (defer_init(nid, pfn, end_pfn))
+                       break;
 
 not_early:
                page = pfn_to_page(pfn);