page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
}
+static inline void set_page_reserved(struct page *page, bool reserved)
+{
+ page->flags &= ~(1ul << PG_reserved);
+ page->flags |= (unsigned long)(!!reserved) << PG_reserved;
+}
+
static inline void set_page_links(struct page *page, enum zone_type zone,
- unsigned long node, unsigned long pfn)
+ unsigned long node, unsigned long pfn, bool reserved)
{
set_page_zone(page, zone);
set_page_node(page, node);
+ set_page_reserved(page, reserved);
#ifdef SECTION_IN_PAGE_FLAGS
set_page_section(page, pfn_to_section_nr(pfn));
#endif
static void __meminit __init_struct_page_nolru(struct page *page,
unsigned long pfn,
- unsigned long zone, int nid)
+ unsigned long zone, int nid,
+ bool is_reserved)
{
mm_zero_struct_page(page);
- set_page_links(page, zone, nid, pfn);
+
+ /*
+ * We can use a non-atomic operation for setting the
+ * PG_reserved flag as we are still initializing the pages.
+ */
+ set_page_links(page, zone, nid, pfn, is_reserved);
init_page_count(page);
page_mapcount_reset(page);
page_cpupid_reset_last(page);
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid)
{
- __init_struct_page_nolru(page, pfn, zone, nid);
+ __init_struct_page_nolru(page, pfn, zone, nid, false);
INIT_LIST_HEAD(&page->lru);
}
static void __meminit __init_pageblock(unsigned long start_pfn,
unsigned long nr_pages,
unsigned long zone, int nid,
- struct dev_pagemap *pgmap)
+ struct dev_pagemap *pgmap,
+ bool is_reserved)
{
unsigned long nr_pgmask = pageblock_nr_pages - 1;
struct page *start_page = pfn_to_page(start_pfn);
* is not defined.
*/
for (page = start_page + nr_pages; page-- != start_page; pfn--) {
- __init_struct_page_nolru(page, pfn, zone, nid);
- /*
- * Mark page reserved as it will need to wait for onlining
- * phase for it to be fully associated with a zone.
- *
- * We can use the non-atomic __set_bit operation for setting
- * the flag as we are still initializing the pages.
- */
- __SetPageReserved(page);
+ __init_struct_page_nolru(page, pfn, zone, nid, is_reserved);
+
/*
* ZONE_DEVICE pages union ->lru with a ->pgmap back
* pointer and hmm_data. It is a bug if a ZONE_DEVICE
pfn = max(ALIGN_DOWN(pfn - 1, pageblock_nr_pages), start_pfn);
stride -= pfn;
- __init_pageblock(pfn, stride, zone, nid, pgmap);
+ /*
+ * The last argument of __init_pageblock is a boolean
+ * value indicating if the page will be marked as reserved.
+ *
+ * Mark page reserved as it will need to wait for onlining
+ * phase for it to be fully associated with a zone.
+ *
+ * Under certain circumstances ZONE_DEVICE pages may not
+ * need to be marked as reserved, however there is still
+ * code that is depending on this being set for now.
+ */
+ __init_pageblock(pfn, stride, zone, nid, pgmap, true);
cond_resched();
}