static void __dump_page(struct page *page)
{
struct folio *folio = page_folio(page);
- struct page *head = &folio->page;
struct address_space *mapping;
- bool compound = PageCompound(page);
+ bool compound = PageTail(page);
/*
* Accessing the pageblock without the zone lock. It could change to
* "isolate" again in the meantime, but since we are just dumping the
int mapcount;
char *type = "";
- if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
+ if (page < folio_page(folio, 0) ||
+ page >= folio_page(folio, MAX_ORDER_NR_PAGES)) {
/*
- * Corrupt page, so we cannot call page_mapping. Instead, do a
- * safe subset of the steps that page_mapping() does. Caution:
+ * Corrupt page, so we cannot call folio_mapping. Instead, do a
+ * safe subset of the steps that folio_mapping() does. Caution:
* this will be misleading for tail pages, PageSwapCache pages,
* and potentially other situations. (See the page_mapping()
* implementation for what's missing here.)
mapping = NULL;
else
mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
- head = page;
folio = (struct folio *)page;
compound = false;
} else {
- mapping = page_mapping(page);
+ mapping = folio_mapping(folio);
+ compound = folio_test_large(folio);
}
/*
* page->_mapcount space in struct page is used by sl[aou]b pages to
* encode own info.
*/
- mapcount = PageSlab(head) ? 0 : page_mapcount(page);
+ mapcount = folio_test_slab(folio) ? 0 : page_mapcount(page);
pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
- page, page_ref_count(head), mapcount, mapping,
+ page, folio_ref_count(folio), mapcount, mapping,
page_to_pgoff(page), page_to_pfn(page));
if (compound) {
- pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
- head, compound_order(head),
+ pr_warn("folio:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
+ folio, folio_order(folio),
folio_entire_mapcount(folio),
folio_nr_pages_mapped(folio),
atomic_read(&folio->_pincount));
}
#ifdef CONFIG_MEMCG
- if (head->memcg_data)
- pr_warn("memcg:%lx\n", head->memcg_data);
+ if (folio->memcg_data)
+ pr_warn("memcg:%lx\n", folio->memcg_data);
#endif
- if (PageKsm(page))
+ if (folio_test_ksm(folio))
type = "ksm ";
- else if (PageAnon(page))
+ else if (folio_test_anon(folio))
type = "anon ";
else if (mapping)
dump_mapping(mapping);
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
- pr_warn("%sflags: %pGp%s\n", type, &head->flags,
+ pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
page_cma ? " CMA" : "");
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
sizeof(unsigned long), page,
sizeof(struct page), false);
- if (head != page)
- print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
- sizeof(unsigned long), head,
+ if (&folio->page != page)
+ print_hex_dump(KERN_WARNING, "folio: ", DUMP_PREFIX_NONE, 32,
+ sizeof(unsigned long), folio,
sizeof(struct page), false);
}