]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Revert "mm: meminit: move page initialization into a separate function"
authorDhaval Giani <dhaval.giani@oracle.com>
Thu, 13 Jul 2017 21:48:08 +0000 (17:48 -0400)
committerDhaval Giani <dhaval.giani@oracle.com>
Sat, 15 Jul 2017 03:45:04 +0000 (23:45 -0400)
This reverts commit aeb76e45d8809efc0c273a5aa6edf481256893c6.

Orabug: 26446232
Signed-off-by: Dhaval Giani <dhaval.giani@oracle.com>
Reviewed-by: Jack Vogel <jack.vogel@oracle.com>
mm/page_alloc.c

index cbace11699d606cffebe316eb7db5513e0a88d8d..cf31aac46e6a7bf18632b5c80d2964157cadc177 100644 (file)
@@ -817,51 +817,6 @@ void reserve_bootmem_region(unsigned long start, unsigned long end)
                        SetPageReserved(pfn_to_page(start_pfn));
 }
 
-static void __meminit __init_single_page(struct page *page, unsigned long pfn,
-                               unsigned long zone, int nid)
-{
-       struct zone *z = &NODE_DATA(nid)->node_zones[zone];
-
-       set_page_links(page, zone, nid, pfn);
-       mminit_verify_page_links(page, zone, nid, pfn);
-       init_page_count(page);
-       page_mapcount_reset(page);
-       page_cpupid_reset_last(page);
-       SetPageReserved(page);
-
-       /*
-        * Mark the block movable so that blocks are reserved for
-        * movable at startup. This will force kernel allocations
-        * to reserve their blocks rather than leaking throughout
-        * the address space during boot when many long-lived
-        * kernel allocations are made. Later some blocks near
-        * the start are marked MIGRATE_RESERVE by
-        * setup_zone_migrate_reserve()
-        *
-        * bitmap is created for zone's valid pfn range. but memmap
-        * can be created for invalid pages (for alignment)
-        * check here not to call set_pageblock_migratetype() against
-        * pfn out of zone.
-        */
-       if ((z->zone_start_pfn <= pfn)
-           && (pfn < zone_end_pfn(z))
-           && !(pfn & (pageblock_nr_pages - 1)))
-               set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-
-       INIT_LIST_HEAD(&page->lru);
-#ifdef WANT_PAGE_VIRTUAL
-       /* The shift won't overflow because ZONE_NORMAL is below 4G. */
-       if (!is_highmem_idx(zone))
-               set_page_address(page, __va(pfn << PAGE_SHIFT));
-#endif
-}
-
-static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
-                                       int nid)
-{
-       return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
-}
-
 static bool free_pages_prepare(struct page *page, unsigned int order)
 {
        bool compound = PageCompound(page);
@@ -4259,6 +4214,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                unsigned long start_pfn, enum memmap_context context)
 {
+       struct page *page;
        unsigned long end_pfn = start_pfn + size;
        unsigned long pfn;
        struct zone *z;
@@ -4279,7 +4235,38 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                        if (!early_pfn_in_nid(pfn, nid))
                                continue;
                }
-               __init_single_pfn(pfn, zone, nid);
+               page = pfn_to_page(pfn);
+               set_page_links(page, zone, nid, pfn);
+               mminit_verify_page_links(page, zone, nid, pfn);
+               init_page_count(page);
+               page_mapcount_reset(page);
+               page_cpupid_reset_last(page);
+               SetPageReserved(page);
+               /*
+                * Mark the block movable so that blocks are reserved for
+                * movable at startup. This will force kernel allocations
+                * to reserve their blocks rather than leaking throughout
+                * the address space during boot when many long-lived
+                * kernel allocations are made. Later some blocks near
+                * the start are marked MIGRATE_RESERVE by
+                * setup_zone_migrate_reserve()
+                *
+                * bitmap is created for zone's valid pfn range. but memmap
+                * can be created for invalid pages (for alignment)
+                * check here not to call set_pageblock_migratetype() against
+                * pfn out of zone.
+                */
+               if ((z->zone_start_pfn <= pfn)
+                   && (pfn < zone_end_pfn(z))
+                   && !(pfn & (pageblock_nr_pages - 1)))
+                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+
+               INIT_LIST_HEAD(&page->lru);
+#ifdef WANT_PAGE_VIRTUAL
+               /* The shift won't overflow because ZONE_NORMAL is below 4G. */
+               if (!is_highmem_idx(zone))
+                       set_page_address(page, __va(pfn << PAGE_SHIFT));
+#endif
        }
 }