]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: add a helper to accept page
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Fri, 9 Aug 2024 11:48:52 +0000 (14:48 +0300)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 17 Aug 2024 00:53:07 +0000 (17:53 -0700)
Accept a given struct page and add it free list.

The help is useful for physical memory scanners that want to use free
unaccepted memory.

Link: https://lkml.kernel.org/r/20240809114854.3745464-7-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/page_alloc.c

index a2e4a6b66e9b80057b994c48a37a1b358c32c6b4..cbe4849f6e73c30d162457133e2a6bdaa58b3fb4 100644 (file)
@@ -1437,4 +1437,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
        unsigned long new_addr, unsigned long len,
        bool need_rmap_locks, bool for_stack);
 
+#ifdef CONFIG_UNACCEPTED_MEMORY
+void accept_page(struct page *page);
+#else /* CONFIG_UNACCEPTED_MEMORY */
+static inline void accept_page(struct page *page)
+{
+}
+#endif /* CONFIG_UNACCEPTED_MEMORY */
+
 #endif /* __MM_INTERNAL_H */
index e7fccd5eb6d08b7d3b7c399b503ce5187f8a5a36..bfd23c21c159bded56dfcc7544e89cdba41e1fc8 100644 (file)
@@ -6937,27 +6937,18 @@ static bool page_contains_unaccepted(struct page *page, unsigned int order)
        return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
 }
 
-static bool try_to_accept_memory_one(struct zone *zone)
+static void __accept_page(struct zone *zone, unsigned long *flags,
+                         struct page *page)
 {
-       unsigned long flags;
-       struct page *page;
        bool last;
 
-       spin_lock_irqsave(&zone->lock, flags);
-       page = list_first_entry_or_null(&zone->unaccepted_pages,
-                                       struct page, lru);
-       if (!page) {
-               spin_unlock_irqrestore(&zone->lock, flags);
-               return false;
-       }
-
        list_del(&page->lru);
        last = list_empty(&zone->unaccepted_pages);
 
        account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
        __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
        __ClearPageUnaccepted(page);
-       spin_unlock_irqrestore(&zone->lock, flags);
+       spin_unlock_irqrestore(&zone->lock, *flags);
 
        accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
 
@@ -6965,6 +6956,38 @@ static bool try_to_accept_memory_one(struct zone *zone)
 
        if (last)
                static_branch_dec(&zones_with_unaccepted_pages);
+}
+
+void accept_page(struct page *page)
+{
+       struct zone *zone = page_zone(page);
+       unsigned long flags;
+
+       spin_lock_irqsave(&zone->lock, flags);
+       if (!PageUnaccepted(page)) {
+               spin_unlock_irqrestore(&zone->lock, flags);
+               return;
+       }
+
+       /* Unlocks zone->lock */
+       __accept_page(zone, &flags, page);
+}
+
+static bool try_to_accept_memory_one(struct zone *zone)
+{
+       unsigned long flags;
+       struct page *page;
+
+       spin_lock_irqsave(&zone->lock, flags);
+       page = list_first_entry_or_null(&zone->unaccepted_pages,
+                                       struct page, lru);
+       if (!page) {
+               spin_unlock_irqrestore(&zone->lock, flags);
+               return false;
+       }
+
+       /* Unlocks zone->lock */
+       __accept_page(zone, &flags, page);
 
        return true;
 }