__ClearPageReserved(p);
        set_page_count(p, 0);
 
-       page_zone(page)->managed_pages += nr_pages;
+       atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
        set_page_refcounted(page);
        __free_pages(page, order);
 }
         * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
         * Check is race-prone but harmless.
         */
-       max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
+       max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
        if (zone->nr_reserved_highatomic >= max_managed)
                return;
 
        struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
 
        for_each_zone_zonelist(zone, z, zonelist, offset) {
-               unsigned long size = zone->managed_pages;
+               unsigned long size = zone_managed_pages(zone);
                unsigned long high = high_wmark_pages(zone);
                if (size > high)
                        sum += size - high;
        pg_data_t *pgdat = NODE_DATA(nid);
 
        for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
-               managed_pages += pgdat->node_zones[zone_type].managed_pages;
+               managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
        val->totalram = managed_pages;
        val->sharedram = node_page_state(pgdat, NR_SHMEM);
        val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
                struct zone *zone = &pgdat->node_zones[zone_type];
 
                if (is_highmem(zone)) {
-                       managed_highpages += zone->managed_pages;
+                       managed_highpages += zone_managed_pages(zone);
                        free_highpages += zone_page_state(zone, NR_FREE_PAGES);
                }
        }
                        K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
                        K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
                        K(zone->present_pages),
-                       K(zone->managed_pages),
+                       K(zone_managed_pages(zone)),
                        K(zone_page_state(zone, NR_MLOCK)),
                        zone_page_state(zone, NR_KERNEL_STACK_KB),
                        K(zone_page_state(zone, NR_PAGETABLE)),
         * The per-cpu-pages pools are set to around 1000th of the
         * size of the zone.
         */
-       batch = zone->managed_pages / 1024;
+       batch = zone_managed_pages(zone) / 1024;
        /* But no more than a meg. */
        if (batch * PAGE_SIZE > 1024 * 1024)
                batch = (1024 * 1024) / PAGE_SIZE;
 {
        if (percpu_pagelist_fraction)
                pageset_set_high(pcp,
-                       (zone->managed_pages /
+                       (zone_managed_pages(zone) /
                                percpu_pagelist_fraction));
        else
                pageset_set_batch(pcp, zone_batchsize(zone));
 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
                                                        unsigned long remaining_pages)
 {
-       zone->managed_pages = remaining_pages;
+       atomic_long_set(&zone->managed_pages, remaining_pages);
        zone_set_nid(zone, nid);
        zone->name = zone_names[idx];
        zone->zone_pgdat = NODE_DATA(nid);
 void adjust_managed_page_count(struct page *page, long count)
 {
        spin_lock(&managed_page_count_lock);
-       page_zone(page)->managed_pages += count;
+       atomic_long_add(count, &page_zone(page)->managed_pages);
        totalram_pages += count;
 #ifdef CONFIG_HIGHMEM
        if (PageHighMem(page))
 {
        __free_reserved_page(page);
        totalram_pages++;
-       page_zone(page)->managed_pages++;
+       atomic_long_inc(&page_zone(page)->managed_pages);
        totalhigh_pages++;
 }
 #endif
                for (i = 0; i < MAX_NR_ZONES; i++) {
                        struct zone *zone = pgdat->node_zones + i;
                        long max = 0;
-                       unsigned long managed_pages = zone->managed_pages;
+                       unsigned long managed_pages = zone_managed_pages(zone);
 
                        /* Find valid and maximum lowmem_reserve in the zone */
                        for (j = i; j < MAX_NR_ZONES; j++) {
        for_each_online_pgdat(pgdat) {
                for (j = 0; j < MAX_NR_ZONES; j++) {
                        struct zone *zone = pgdat->node_zones + j;
-                       unsigned long managed_pages = zone->managed_pages;
+                       unsigned long managed_pages = zone_managed_pages(zone);
 
                        zone->lowmem_reserve[j] = 0;
 
                                        lower_zone->lowmem_reserve[j] =
                                                managed_pages / sysctl_lowmem_reserve_ratio[idx];
                                }
-                               managed_pages += lower_zone->managed_pages;
+                               managed_pages += zone_managed_pages(lower_zone);
                        }
                }
        }
        /* Calculate total number of !ZONE_HIGHMEM pages */
        for_each_zone(zone) {
                if (!is_highmem(zone))
-                       lowmem_pages += zone->managed_pages;
+                       lowmem_pages += zone_managed_pages(zone);
        }
 
        for_each_zone(zone) {
                u64 tmp;
 
                spin_lock_irqsave(&zone->lock, flags);
-               tmp = (u64)pages_min * zone->managed_pages;
+               tmp = (u64)pages_min * zone_managed_pages(zone);
                do_div(tmp, lowmem_pages);
                if (is_highmem(zone)) {
                        /*
                         */
                        unsigned long min_pages;
 
-                       min_pages = zone->managed_pages / 1024;
+                       min_pages = zone_managed_pages(zone) / 1024;
                        min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
                        zone->watermark[WMARK_MIN] = min_pages;
                } else {
                 * ensure a minimum size on small systems.
                 */
                tmp = max_t(u64, tmp >> 2,
-                           mult_frac(zone->managed_pages,
+                           mult_frac(zone_managed_pages(zone),
                                      watermark_scale_factor, 10000));
 
                zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
                pgdat->min_unmapped_pages = 0;
 
        for_each_zone(zone)
-               zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
-                               sysctl_min_unmapped_ratio) / 100;
+               zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
+                                                        sysctl_min_unmapped_ratio) / 100;
 }
 
 
                pgdat->min_slab_pages = 0;
 
        for_each_zone(zone)
-               zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
-                               sysctl_min_slab_ratio) / 100;
+               zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
+                                                    sysctl_min_slab_ratio) / 100;
 }
 
 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,