]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm: Rename page->mapping to page->__folio_mapping shrunk-page
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 17 Mar 2025 17:38:19 +0000 (13:38 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 17 Mar 2025 19:55:41 +0000 (15:55 -0400)
All users of page->mapping except page migration have been converted
to not refer to it any more.  Prevent new users from appearing (or at
least make them easy to grep for).  That lets us delete the TAIL_MAPPING
poison as there's no way to see it any more.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
12 files changed:
include/linux/migrate.h
include/linux/mm_types.h
include/linux/page-flags.h
include/linux/poison.h
include/trace/events/page_ref.h
kernel/vmcore_info.c
mm/compaction.c
mm/internal.h
mm/migrate_device.c
mm/page_alloc.c
mm/zpdesc.h
tools/include/linux/poison.h

index aaa2114498d6de12db84154ac6a8f22343404729..8f1261d0e75a8585980422a5010c5e3104233c97 100644 (file)
@@ -138,7 +138,7 @@ const struct movable_operations *page_movable_ops(struct page *page)
        VM_BUG_ON(!__PageMovable(page));
 
        return (const struct movable_operations *)
-               ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
+               ((unsigned long)page->__folio_mapping - PAGE_MAPPING_MOVABLE);
 }
 
 #ifdef CONFIG_NUMA_BALANCING
index 86db7fc40021e5e84635ebf40cec91f84db59084..3ed24519a16b5db929e992fec7f17d7f090861bd 100644 (file)
@@ -106,7 +106,7 @@ struct page {
                                };
                        };
                        /* See page-flags.h for PAGE_MAPPING_FLAGS */
-                       struct address_space *mapping;
+                       struct address_space *__folio_mapping;
                        union {
                                pgoff_t __folio_index;          /* Our offset within mapping. */
                                unsigned long share;    /* share count for fsdax */
@@ -487,7 +487,7 @@ struct folio {
        static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
 FOLIO_MATCH(flags, flags);
 FOLIO_MATCH(lru, lru);
-FOLIO_MATCH(mapping, mapping);
+FOLIO_MATCH(__folio_mapping, mapping);
 FOLIO_MATCH(compound_head, lru);
 FOLIO_MATCH(__folio_index, index);
 FOLIO_MATCH(private, private);
@@ -533,14 +533,14 @@ FOLIO_MATCH(compound_head, _head_3);
  *                    pgds.
  * @_pt_pad_1:        Padding that aliases with page's compound head.
  * @pmd_huge_pte:     Protected by ptdesc->ptl, used for THPs.
- * @__page_mapping:   Aliases with page->mapping. Unused for page tables.
+ * @__folio_mapping:  Aliases with folio->mapping. Unused for page tables.
  * @pt_index:         Used for s390 gmap.
  * @pt_mm:            Used for x86 pgds.
  * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
  * @pt_share_count:   Used for HugeTLB PMD page table share count.
  * @_pt_pad_2:        Padding to ensure proper alignment.
  * @ptl:              Lock for the page table.
- * @__page_type:      Same as page->page_type. Unused for page tables.
+ * @__page_type:      Set to PGTY_table.  Bottom 24 bits unused.
  * @__page_refcount:  Same as page refcount.
  * @pt_memcg_data:    Memcg data. Tracked for page tables here.
  *
@@ -558,7 +558,7 @@ struct ptdesc {
                        pgtable_t pmd_huge_pte;
                };
        };
-       unsigned long __page_mapping;
+       unsigned long __folio_mapping;
 
        union {
                pgoff_t pt_index;
@@ -589,7 +589,7 @@ struct ptdesc {
 TABLE_MATCH(flags, __page_flags);
 TABLE_MATCH(compound_head, pt_list);
 TABLE_MATCH(compound_head, _pt_pad_1);
-TABLE_MATCH(mapping, __page_mapping);
+TABLE_MATCH(__folio_mapping, __folio_mapping);
 TABLE_MATCH(__folio_index, pt_index);
 TABLE_MATCH(rcu_head, pt_rcu_head);
 TABLE_MATCH(page_type, __page_type);
index 31d2575d178b55e922dd351f1c17476eb93ef338..2091f44bc5afe62b3de422512b560c41b1b045b1 100644 (file)
@@ -740,7 +740,7 @@ static __always_inline bool __folio_test_movable(const struct folio *folio)
 
 static __always_inline bool __PageMovable(const struct page *page)
 {
-       return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+       return ((unsigned long)page->__folio_mapping & PAGE_MAPPING_FLAGS) ==
                                PAGE_MAPPING_MOVABLE;
 }
 
index 331a9a996fa8746626afa63ea462b85ca3e5938b..c6682e38d1fc5cc94990b7b49c2f32064b8315c5 100644 (file)
 /********** mm/page_poison.c **********/
 #define PAGE_POISON 0xaa
 
-/********** mm/page_alloc.c ************/
-
-#define TAIL_MAPPING   ((void *) 0x400 + POISON_POINTER_DELTA)
-
 /********** mm/slab.c **********/
 /*
  * Magic nums for obj red zoning.
index fe33a255b7d093aa46b7b0e3cd301475d298275e..c14494d5cd3b5d39788142808c377c76383a555b 100644 (file)
@@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template,
                __entry->flags = page->flags;
                __entry->count = page_ref_count(page);
                __entry->mapcount = atomic_read(&page->_mapcount);
-               __entry->mapping = page->mapping;
+               __entry->mapping = page->__folio_mapping;
                __entry->mt = get_pageblock_migratetype(page);
                __entry->val = v;
        ),
@@ -80,7 +80,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template,
                __entry->flags = page->flags;
                __entry->count = page_ref_count(page);
                __entry->mapcount = atomic_read(&page->_mapcount);
-               __entry->mapping = page->mapping;
+               __entry->mapping = page->__folio_mapping;
                __entry->mt = get_pageblock_migratetype(page);
                __entry->val = v;
                __entry->ret = ret;
index 1fec61603ef3260a555180921184c9974ddcfb9c..30e5d9e3769a74b8979c1f3713e9831d85025832 100644 (file)
@@ -171,7 +171,7 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_SIZE(nodemask_t);
        VMCOREINFO_OFFSET(page, flags);
        VMCOREINFO_OFFSET(page, _refcount);
-       VMCOREINFO_OFFSET(page, mapping);
+       VMCOREINFO_OFFSET(folio, mapping);
        VMCOREINFO_OFFSET(page, lru);
        VMCOREINFO_OFFSET(page, _mapcount);
        VMCOREINFO_OFFSET(page, private);
index 4a2ccb82d0b27ae778d6381a9cedc2a9168a7b48..b472aa9f8e541a34ba112fdc6e985a0641490536 100644 (file)
@@ -133,7 +133,7 @@ void __SetPageMovable(struct page *page, const struct movable_operations *mops)
 {
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page);
-       page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE);
+       page->__folio_mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE);
 }
 EXPORT_SYMBOL(__SetPageMovable);
 
@@ -144,7 +144,7 @@ void __ClearPageMovable(struct page *page)
         * This page still has the type of a movable page, but it's
         * actually not movable any more.
         */
-       page->mapping = (void *)PAGE_MAPPING_MOVABLE;
+       page->__folio_mapping = (void *)PAGE_MAPPING_MOVABLE;
 }
 EXPORT_SYMBOL(__ClearPageMovable);
 
index 6869c8ceea3481679edd2f4705ed6a1c1db07e97..52c64b19a501a936b7d4aada865c7daa3f18901f 100644 (file)
@@ -784,7 +784,6 @@ static inline void prep_compound_tail(struct page *head, int tail_idx)
 {
        struct page *p = head + tail_idx;
 
-       p->mapping = TAIL_MAPPING;
        set_compound_head(p, head);
        set_page_private(p, 0);
 }
index 3158afe7eb2308f5b259c618a9452785e524574b..f3f900663e9a92eb55f956d1d951ea8b3019e013 100644 (file)
@@ -176,7 +176,7 @@ again:
                }
 
                /* FIXME support THP */
-               if (!page || !page->mapping || PageTransCompound(page)) {
+               if (!page || !page->__folio_mapping || PageTransCompound(page)) {
                        mpfn = 0;
                        goto next;
                }
index 5c1a0f0cf26f777fbec500c7cd36f6bba5b72c74..37a6c0a241a48fcd3d1051ffccbed164de0e80a7 100644 (file)
@@ -892,7 +892,7 @@ static inline bool page_expected_state(struct page *page,
        if (unlikely(atomic_read(&page->_mapcount) != -1))
                return false;
 
-       if (unlikely((unsigned long)page->mapping |
+       if (unlikely((unsigned long)page->__folio_mapping |
                        page_ref_count(page) |
 #ifdef CONFIG_MEMCG
                        page->memcg_data |
@@ -912,7 +912,7 @@ static const char *page_bad_reason(struct page *page, unsigned long flags)
 
        if (unlikely(atomic_read(&page->_mapcount) != -1))
                bad_reason = "nonzero mapcount";
-       if (unlikely(page->mapping != NULL))
+       if (unlikely(page->__folio_mapping != NULL))
                bad_reason = "non-NULL mapping";
        if (unlikely(page_ref_count(page) != 0))
                bad_reason = "nonzero _refcount";
@@ -970,8 +970,7 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
                goto out;
        }
        switch (page - head_page) {
-       case 1:
-               /* the first tail page: these may be in place of ->mapping */
+       case 1:         /* the first tail page */
                if (unlikely(folio_large_mapcount(folio))) {
                        bad_page(page, "nonzero large_mapcount");
                        goto out;
@@ -1002,8 +1001,7 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
                        }
                }
                break;
-       case 2:
-               /* the second tail page: deferred_list overlaps ->mapping */
+       case 2:         /* the second tail page */
                if (unlikely(!list_empty(&folio->_deferred_list))) {
                        bad_page(page, "on deferred list");
                        goto out;
@@ -1019,16 +1017,7 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
                        }
                }
                break;
-       case 3:
-               /* the third tail page: hugetlb specifics overlap ->mappings */
-               if (IS_ENABLED(CONFIG_HUGETLB_PAGE))
-                       break;
-               fallthrough;
        default:
-               if (page->mapping != TAIL_MAPPING) {
-                       bad_page(page, "corrupted mapping in tail page");
-                       goto out;
-               }
                break;
        }
        if (unlikely(!PageTail(page))) {
@@ -1041,7 +1030,7 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
        }
        ret = 0;
 out:
-       page->mapping = NULL;
+       page->__folio_mapping = NULL;
        clear_compound_head(page);
        return ret;
 }
index 506eec5d98fe1aacdb7fae422c2253239a94e5c8..fccb173da971e47b5ecdc960f8f0360607c933bb 100644 (file)
@@ -50,7 +50,7 @@ struct zpdesc {
 
 ZPDESC_MATCH(flags, flags);
 ZPDESC_MATCH(lru, lru);
-ZPDESC_MATCH(mapping, movable_ops);
+ZPDESC_MATCH(__folio_mapping, movable_ops);
 ZPDESC_MATCH(__folio_index, next);
 ZPDESC_MATCH(__folio_index, handle);
 ZPDESC_MATCH(private, zspage);
index e530e54046c9bab929df2a6134472c5d05065dee..b4b804af8d43c095205b9a2a3217e76790e4ea73 100644 (file)
 /********** mm/page_poison.c **********/
 #define PAGE_POISON 0xaa
 
-/********** mm/page_alloc.c ************/
-
-#define TAIL_MAPPING   ((void *) 0x400 + POISON_POINTER_DELTA)
-
 /********** mm/slab.c **********/
 /*
  * Magic nums for obj red zoning.