return test_bit(ZONE_OOM_LOCKED, &zone->flags);
 }
 
+static inline unsigned zone_end_pfn(const struct zone *zone)
+{
+       return zone->zone_start_pfn + zone->spanned_pages;
+}
+
+static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
+{
+       return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
+}
+
 /*
  * The "priority" of VM scanning is how much of the queues we will scan in one
  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
 
 static void __reset_isolation_suitable(struct zone *zone)
 {
        unsigned long start_pfn = zone->zone_start_pfn;
-       unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+       unsigned long end_pfn = zone_end_pfn(zone);
        unsigned long pfn;
 
        zone->compact_cached_migrate_pfn = start_pfn;
                                struct compact_control *cc)
 {
        struct page *page;
-       unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
+       unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
        int nr_freepages = cc->nr_freepages;
        struct list_head *freelist = &cc->freepages;
 
         */
        high_pfn = min(low_pfn, pfn);
 
-       zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+       z_end_pfn = zone_end_pfn(zone);
 
        /*
         * Isolate free pages until enough are available to migrate the
                 * only scans within a pageblock
                 */
                end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
-               end_pfn = min(end_pfn, zone_end_pfn);
+               end_pfn = min(end_pfn, z_end_pfn);
                isolated = isolate_freepages_block(cc, pfn, end_pfn,
                                                   freelist, false);
                nr_freepages += isolated;
 {
        int ret;
        unsigned long start_pfn = zone->zone_start_pfn;
-       unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+       unsigned long end_pfn = zone_end_pfn(zone);
 
        ret = compaction_suitable(zone, cc->order);
        switch (ret) {
 
         */
        lock_memory_hotplug();
        for_each_online_node(i) {
-               pg_data_t *pgdat = NODE_DATA(i);
-               unsigned long start_pfn = pgdat->node_start_pfn;
-               unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
+               unsigned long start_pfn = node_start_pfn(i);
+               unsigned long end_pfn = node_end_pfn(i);
                unsigned long pfn;
 
                for (pfn = start_pfn; pfn < end_pfn; pfn++) {
 
        pgdat_resize_lock(z1->zone_pgdat, &flags);
 
        /* can't move pfns which are higher than @z2 */
-       if (end_pfn > z2->zone_start_pfn + z2->spanned_pages)
+       if (end_pfn > zone_end_pfn(z2))
                goto out_fail;
        /* the move out part mast at the left most of @z2 */
        if (start_pfn > z2->zone_start_pfn)
                z1_start_pfn = start_pfn;
 
        resize_zone(z1, z1_start_pfn, end_pfn);
-       resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages);
+       resize_zone(z2, end_pfn, zone_end_pfn(z2));
 
        pgdat_resize_unlock(z1->zone_pgdat, &flags);
 
        if (z1->zone_start_pfn > start_pfn)
                goto out_fail;
        /* the move out part mast at the right most of @z1 */
-       if (z1->zone_start_pfn + z1->spanned_pages >  end_pfn)
+       if (zone_end_pfn(z1) >  end_pfn)
                goto out_fail;
        /* must included/overlap */
-       if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages)
+       if (start_pfn >= zone_end_pfn(z1))
                goto out_fail;
 
        /* use end_pfn for z2's end_pfn if z2 is empty */
        if (z2->spanned_pages)
-               z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages;
+               z2_end_pfn = zone_end_pfn(z2);
        else
                z2_end_pfn = end_pfn;
 
 
 
        do {
                seq = zone_span_seqbegin(zone);
-               if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
-                       ret = 1;
-               else if (pfn < zone->zone_start_pfn)
+               if (!zone_spans_pfn(zone, pfn))
                        ret = 1;
        } while (zone_span_seqretry(zone, seq));
 
        end_pfn = start_pfn + pageblock_nr_pages - 1;
 
        /* Do not cross zone boundaries */
-       if (start_pfn < zone->zone_start_pfn)
+       if (!zone_spans_pfn(zone, start_pfn))
                start_page = page;
-       if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
+       if (!zone_spans_pfn(zone, end_pfn))
                return 0;
 
        return move_freepages(zone, start_page, end_page, migratetype);
 
        spin_lock_irqsave(&zone->lock, flags);
 
-       max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+       max_zone_pfn = zone_end_pfn(zone);
        for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
                if (pfn_valid(pfn)) {
                        struct page *page = pfn_to_page(pfn);
         * the block.
         */
        start_pfn = zone->zone_start_pfn;
-       end_pfn = start_pfn + zone->spanned_pages;
+       end_pfn = zone_end_pfn(zone);
        start_pfn = roundup(start_pfn, pageblock_nr_pages);
        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
                 * pfn out of zone.
                 */
                if ((z->zone_start_pfn <= pfn)
-                   && (pfn < z->zone_start_pfn + z->spanned_pages)
+                   && (pfn < zone_end_pfn(z))
                    && !(pfn & (pageblock_nr_pages - 1)))
                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
 
                 * for the buddy allocator to function correctly.
                 */
                start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
-               end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+               end = pgdat_end_pfn(pgdat);
                end = ALIGN(end, MAX_ORDER_NR_PAGES);
                size =  (end - start) * sizeof(struct page);
                map = alloc_remap(pgdat->node_id, size);
        pfn = page_to_pfn(page);
        bitmap = get_pageblock_bitmap(zone, pfn);
        bitidx = pfn_to_bitidx(zone, pfn);
-       VM_BUG_ON(pfn < zone->zone_start_pfn);
-       VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
+       VM_BUG_ON(!zone_spans_pfn(zone, pfn));
 
        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
                if (flags & value)
 
        zone = page_zone(page);
        pfn = page_to_pfn(page);
-       if (zone->zone_start_pfn > pfn ||
-                       zone->zone_start_pfn + zone->spanned_pages <= pfn)
+       if (!zone_spans_pfn(zone, pfn))
                return false;
 
        return !has_unmovable_pages(zone, page, 0, true);
 
        int mtype;
        unsigned long pfn;
        unsigned long start_pfn = zone->zone_start_pfn;
-       unsigned long end_pfn = start_pfn + zone->spanned_pages;
+       unsigned long end_pfn = zone_end_pfn(zone);
        unsigned long count[MIGRATE_TYPES] = { 0, };
 
        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {