]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: introduce PageUnaccepted() page type
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Fri, 9 Aug 2024 11:48:50 +0000 (14:48 +0300)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 17 Aug 2024 00:53:06 +0000 (17:53 -0700)
The new page type allows physical memory scanners to detect unaccepted
memory and handle it accordingly.

The page type is serialized with zone lock.

Link: https://lkml.kernel.org/r/20240809114854.3745464-5-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
mm/page_alloc.c

index a0a29bd092f8f7aed6124f10f65eebde9385be5e..17175a328e6b4e69cb2b66c9bc0b02a44db92690 100644 (file)
@@ -939,6 +939,7 @@ enum pagetype {
        PG_hugetlb      = 0x04000000,
        PG_slab         = 0x02000000,
        PG_zsmalloc     = 0x01000000,
+       PG_unaccepted   = 0x00800000,
 
        PAGE_TYPE_BASE  = 0x80000000,
 
@@ -1072,6 +1073,13 @@ FOLIO_TEST_FLAG_FALSE(hugetlb)
 
 PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
 
+/*
+ * Mark pages that has to be accepted before touched for the first time.
+ *
+ * Serialized with zone lock.
+ */
+PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
+
 /**
  * PageHuge - Determine if the page belongs to hugetlbfs
  * @page: The page to test.
index 32209083d8a70d0c95fcc38082ff0b4374a4ab56..d8bc99e3c78c41f8a981c621fda3b2861d8b280d 100644 (file)
@@ -6965,6 +6965,7 @@ static bool try_to_accept_memory_one(struct zone *zone)
 
        account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
        __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
+       __ClearPageUnaccepted(page);
        spin_unlock_irqrestore(&zone->lock, flags);
 
        accept_page(page, MAX_PAGE_ORDER);
@@ -7023,6 +7024,7 @@ static bool __free_unaccepted(struct page *page)
        list_add_tail(&page->lru, &zone->unaccepted_pages);
        account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
        __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
+       __SetPageUnaccepted(page);
        spin_unlock_irqrestore(&zone->lock, flags);
 
        if (first)