]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm/debug: Convert __dump_page() to use a folio throughout
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 2 Jul 2022 18:26:34 +0000 (14:26 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 14:00:32 +0000 (09:00 -0500)
We have to be a little careful here because a corrupt page may appear
to be a tail page, and that can trip some debugging assertions leading
to an infinite loop of dumping pages.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/debug.c

index 9d3d893dc7f4eafe2bd93bcf9f6a430fc45d113f..9e9e1603e79bc83c389fd3b1c68762184ea95c96 100644 (file)
@@ -49,9 +49,8 @@ const struct trace_print_flags vmaflag_names[] = {
 static void __dump_page(struct page *page)
 {
        struct folio *folio = page_folio(page);
-       struct page *head = &folio->page;
        struct address_space *mapping;
-       bool compound = PageCompound(page);
+       bool compound = PageTail(page);
        /*
         * Accessing the pageblock without the zone lock. It could change to
         * "isolate" again in the meantime, but since we are just dumping the
@@ -62,10 +61,11 @@ static void __dump_page(struct page *page)
        int mapcount;
        char *type = "";
 
-       if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
+       if (page < folio_page(folio, 0) ||
+           page >= folio_page(folio, MAX_ORDER_NR_PAGES)) {
                /*
-                * Corrupt page, so we cannot call page_mapping. Instead, do a
-                * safe subset of the steps that page_mapping() does. Caution:
+                * Corrupt page, so we cannot call folio_mapping. Instead, do a
+                * safe subset of the steps that folio_mapping() does. Caution:
                 * this will be misleading for tail pages, PageSwapCache pages,
                 * and potentially other situations. (See the page_mapping()
                 * implementation for what's missing here.)
@@ -76,11 +76,11 @@ static void __dump_page(struct page *page)
                        mapping = NULL;
                else
                        mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
-               head = page;
                folio = (struct folio *)page;
                compound = false;
        } else {
-               mapping = page_mapping(page);
+               mapping = folio_mapping(folio);
+               compound = folio_test_large(folio);
        }
 
        /*
@@ -88,39 +88,39 @@ static void __dump_page(struct page *page)
         * page->_mapcount space in struct page is used by sl[aou]b pages to
         * encode own info.
         */
-       mapcount = PageSlab(head) ? 0 : page_mapcount(page);
+       mapcount = folio_test_slab(folio) ? 0 : page_mapcount(page);
 
        pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
-                       page, page_ref_count(head), mapcount, mapping,
+                       page, folio_ref_count(folio), mapcount, mapping,
                        page_to_pgoff(page), page_to_pfn(page));
        if (compound) {
-               pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
-                               head, compound_order(head),
+               pr_warn("folio:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
+                               folio, folio_order(folio),
                                folio_entire_mapcount(folio),
                                folio_nr_pages_mapped(folio),
                                atomic_read(&folio->_pincount));
        }
 
 #ifdef CONFIG_MEMCG
-       if (head->memcg_data)
-               pr_warn("memcg:%lx\n", head->memcg_data);
+       if (folio->memcg_data)
+               pr_warn("memcg:%lx\n", folio->memcg_data);
 #endif
-       if (PageKsm(page))
+       if (folio_test_ksm(folio))
                type = "ksm ";
-       else if (PageAnon(page))
+       else if (folio_test_anon(folio))
                type = "anon ";
        else if (mapping)
                dump_mapping(mapping);
        BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
 
-       pr_warn("%sflags: %pGp%s\n", type, &head->flags,
+       pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
                page_cma ? " CMA" : "");
        print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
                        sizeof(unsigned long), page,
                        sizeof(struct page), false);
-       if (head != page)
-               print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
-                       sizeof(unsigned long), head,
+       if (&folio->page != page)
+               print_hex_dump(KERN_WARNING, "folio: ", DUMP_PREFIX_NONE, 32,
+                       sizeof(unsigned long), folio,
                        sizeof(struct page), false);
 }