]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: page_alloc: set migratetype inside move_freepages()
authorZi Yan <ziy@nvidia.com>
Wed, 20 Mar 2024 18:02:13 +0000 (14:02 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:04 +0000 (20:56 -0700)
This avoids changing migratetype after move_freepages() or
move_freepages_block(), which is error prone.  It also prepares for
upcoming changes to fix move_freepages() not moving free pages partially
in the range.

Link: https://lkml.kernel.org/r/20240320180429.678181-9-hannes@cmpxchg.org
Signed-off-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c
mm/page_isolation.c

index 289dcb43471926c14f71efcda95e040b6b506dbe..fee52ce8ab2d4e5139acc0136f765ca647cb8640 100644 (file)
@@ -1586,9 +1586,8 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
 #endif
 
 /*
- * Move the free pages in a range to the freelist tail of the requested type.
- * Note that start_page and end_pages are not aligned on a pageblock
- * boundary. If alignment is required, use move_freepages_block()
+ * Change the type of a block and move all its free pages to that
+ * type's freelist.
  */
 static int move_freepages(struct zone *zone, unsigned long start_pfn,
                          unsigned long end_pfn, int migratetype)
@@ -1598,6 +1597,9 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
        unsigned int order;
        int pages_moved = 0;
 
+       VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
+       VM_WARN_ON(start_pfn + pageblock_nr_pages - 1 != end_pfn);
+
        for (pfn = start_pfn; pfn <= end_pfn;) {
                page = pfn_to_page(pfn);
                if (!PageBuddy(page)) {
@@ -1615,6 +1617,8 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
                pages_moved += 1 << order;
        }
 
+       set_pageblock_migratetype(pfn_to_page(start_pfn), migratetype);
+
        return pages_moved;
 }
 
@@ -1842,7 +1846,6 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
        if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
                        page_group_by_mobility_disabled) {
                move_freepages(zone, start_pfn, end_pfn, start_type);
-               set_pageblock_migratetype(page, start_type);
                return __rmqueue_smallest(zone, order, start_type);
        }
 
@@ -1916,12 +1919,10 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
        /* Yoink! */
        mt = get_pageblock_migratetype(page);
        /* Only reserve normal pageblocks (i.e., they can merge with others) */
-       if (migratetype_is_mergeable(mt)) {
-               if (move_freepages_block(zone, page, MIGRATE_HIGHATOMIC) != -1) {
-                       set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
+       if (migratetype_is_mergeable(mt))
+               if (move_freepages_block(zone, page,
+                                        MIGRATE_HIGHATOMIC) != -1)
                        zone->nr_reserved_highatomic += pageblock_nr_pages;
-               }
-       }
 
 out_unlock:
        spin_unlock_irqrestore(&zone->lock, flags);
@@ -2000,7 +2001,6 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
                         * not fail on zone boundaries.
                         */
                        WARN_ON_ONCE(ret == -1);
-                       set_pageblock_migratetype(page, ac->migratetype);
                        if (ret > 0) {
                                spin_unlock_irqrestore(&zone->lock, flags);
                                return ret;
@@ -2682,10 +2682,9 @@ int __isolate_free_page(struct page *page, unsigned int order)
                         * Only change normal pageblocks (i.e., they can merge
                         * with others)
                         */
-                       if (migratetype_is_mergeable(mt) &&
-                           move_freepages_block(zone, page,
-                                                MIGRATE_MOVABLE) != -1)
-                               set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+                       if (migratetype_is_mergeable(mt))
+                               move_freepages_block(zone, page,
+                                                    MIGRATE_MOVABLE);
                }
        }
 
index 71539d7b96cf909a8738d10a6831362ee668e050..f84f0981b2dfa81d7b76edec97bf7a5be1396405 100644 (file)
@@ -188,7 +188,6 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
                        return -EBUSY;
                }
                __mod_zone_freepage_state(zone, -nr_pages, mt);
-               set_pageblock_migratetype(page, MIGRATE_ISOLATE);
                zone->nr_isolate_pageblock++;
                spin_unlock_irqrestore(&zone->lock, flags);
                return 0;
@@ -262,10 +261,10 @@ static void unset_migratetype_isolate(struct page *page, int migratetype)
                 */
                WARN_ON_ONCE(nr_pages == -1);
                __mod_zone_freepage_state(zone, nr_pages, migratetype);
-       }
-       set_pageblock_migratetype(page, migratetype);
-       if (isolated_page)
+       } else {
+               set_pageblock_migratetype(page, migratetype);
                __putback_isolated_page(page, order, migratetype);
+       }
        zone->nr_isolate_pageblock--;
 out:
        spin_unlock_irqrestore(&zone->lock, flags);