#endif
 };
 
+unsigned long page_ext_size = sizeof(struct page_ext);
+
 static unsigned long total_usage;
-static unsigned long extra_mem;
 
 static bool __init invoke_need_callbacks(void)
 {
 
        for (i = 0; i < entries; i++) {
                if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
-                       page_ext_ops[i]->offset = sizeof(struct page_ext) +
-                                               extra_mem;
-                       extra_mem += page_ext_ops[i]->size;
+                       page_ext_ops[i]->offset = page_ext_size;
+                       page_ext_size += page_ext_ops[i]->size;
                        need = true;
                }
        }
        }
 }
 
-static unsigned long get_entry_size(void)
-{
-       return sizeof(struct page_ext) + extra_mem;
-}
-
 static inline struct page_ext *get_entry(void *base, unsigned long index)
 {
-       return base + get_entry_size() * index;
+       return base + page_ext_size * index;
 }
 
 #if !defined(CONFIG_SPARSEMEM)
                !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
                nr_pages += MAX_ORDER_NR_PAGES;
 
-       table_size = get_entry_size() * nr_pages;
+       table_size = page_ext_size * nr_pages;
 
        base = memblock_alloc_try_nid(
                        table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
        if (section->page_ext)
                return 0;
 
-       table_size = get_entry_size() * PAGES_PER_SECTION;
+       table_size = page_ext_size * PAGES_PER_SECTION;
        base = alloc_page_ext(table_size, nid);
 
        /*
         * we need to apply a mask.
         */
        pfn &= PAGE_SECTION_MASK;
-       section->page_ext = (void *)base - get_entry_size() * pfn;
+       section->page_ext = (void *)base - page_ext_size * pfn;
        total_usage += table_size;
        return 0;
 }
                struct page *page = virt_to_page(addr);
                size_t table_size;
 
-               table_size = get_entry_size() * PAGES_PER_SECTION;
+               table_size = page_ext_size * PAGES_PER_SECTION;
 
                BUG_ON(PageReserved(page));
                kmemleak_free(addr);
 
                handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
 #endif
 
+       page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
        for (i = 0; i < (1 << order); i++) {
-               page_ext = lookup_page_ext(page + i);
-               if (unlikely(!page_ext))
-                       continue;
                __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
 #ifdef CONFIG_DEBUG_PAGEALLOC
                if (debug_pagealloc_enabled()) {
                        page_owner->free_handle = handle;
                }
 #endif
+               page_ext = page_ext_next(page_ext);
        }
 }
 
                __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
                __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
 
-               page_ext = lookup_page_ext(page + i);
+               page_ext = page_ext_next(page_ext);
        }
 }
 
        if (unlikely(!page_ext))
                return;
 
-       page_owner = get_page_owner(page_ext);
-       page_owner->order = 0;
-       for (i = 1; i < (1 << order); i++) {
-               page_ext = lookup_page_ext(page + i);
+       for (i = 0; i < (1 << order); i++) {
                page_owner = get_page_owner(page_ext);
                page_owner->order = 0;
+               page_ext = page_ext_next(page_ext);
        }
 }