#ifdef CONFIG_HIGHMEM
        unsigned long tmp;
+
+       /*
+        * Explicitly reset zone->managed_pages because highmem pages are
+        * freed before calling free_all_bootmem_node();
+        */
+       reset_all_zones_managed_pages();
        for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
                free_highmem_page(pfn_to_page(tmp));
        num_physpages += totalhigh_pages;
 
 #include <linux/highmem.h>
 #include <linux/module.h>
 #include <linux/swap.h> /* for totalram_pages */
+#include <linux/bootmem.h>
 
 void *kmap(struct page *page)
 {
        struct zone *zone;
        int nid;
 
+       /*
+        * Explicitly reset zone->managed_pages because set_highmem_pages_init()
+        * is invoked before free_all_bootmem()
+        */
+       reset_all_zones_managed_pages();
        for_each_zone(zone) {
                unsigned long zone_start_pfn, zone_end_pfn;
 
 
 
 extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
 extern unsigned long free_all_bootmem(void);
+extern void reset_all_zones_managed_pages(void);
 
 extern void free_bootmem_node(pg_data_t *pgdat,
                              unsigned long addr,
 
        return count;
 }
 
-static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+static int reset_managed_pages_done __initdata;
+
+static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
 {
        struct zone *z;
 
-       /*
-        * In free_area_init_core(), highmem zone's managed_pages is set to
-        * present_pages, and bootmem allocator doesn't allocate from highmem
-        * zones. So there's no need to recalculate managed_pages because all
-        * highmem pages will be managed by the buddy system. Here highmem
-        * zone also includes highmem movable zone.
-        */
+       if (reset_managed_pages_done)
+               return;
+
        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
-               if (!is_highmem(z))
-                       z->managed_pages = 0;
+               z->managed_pages = 0;
+}
+
+void __init reset_all_zones_managed_pages(void)
+{
+       struct pglist_data *pgdat;
+
+       for_each_online_pgdat(pgdat)
+               reset_node_managed_pages(pgdat);
+       reset_managed_pages_done = 1;
 }
 
 /**
 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 {
        register_page_bootmem_info_node(pgdat);
-       reset_node_lowmem_managed_pages(pgdat);
+       reset_node_managed_pages(pgdat);
        return free_all_bootmem_core(pgdat->bdata);
 }
 
 {
        unsigned long total_pages = 0;
        bootmem_data_t *bdata;
-       struct pglist_data *pgdat;
 
-       for_each_online_pgdat(pgdat)
-               reset_node_lowmem_managed_pages(pgdat);
+       reset_all_zones_managed_pages();
 
        list_for_each_entry(bdata, &bdata_list, list)
                total_pages += free_all_bootmem_core(bdata);
 
        return count;
 }
 
-static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+static int reset_managed_pages_done __initdata;
+
+static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
 {
        struct zone *z;
 
-       /*
-        * In free_area_init_core(), highmem zone's managed_pages is set to
-        * present_pages, and bootmem allocator doesn't allocate from highmem
-        * zones. So there's no need to recalculate managed_pages because all
-        * highmem pages will be managed by the buddy system. Here highmem
-        * zone also includes highmem movable zone.
-        */
+       if (reset_managed_pages_done)
+               return;
        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
-               if (!is_highmem(z))
-                       z->managed_pages = 0;
+               z->managed_pages = 0;
+}
+
+void __init reset_all_zones_managed_pages(void)
+{
+       struct pglist_data *pgdat;
+
+       for_each_online_pgdat(pgdat)
+               reset_node_managed_pages(pgdat);
+       reset_managed_pages_done = 1;
 }
 
 /**
  */
 unsigned long __init free_all_bootmem(void)
 {
-       struct pglist_data *pgdat;
-
-       for_each_online_pgdat(pgdat)
-               reset_node_lowmem_managed_pages(pgdat);
+       reset_all_zones_managed_pages();
 
        /*
         * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
 
 {
        __free_reserved_page(page);
        totalram_pages++;
+       page_zone(page)->managed_pages++;
        totalhigh_pages++;
 }
 #endif