]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/compaction: fix the range to pageblock_pfn_to_page()
authorWei Yang <richard.weiyang@gmail.com>
Thu, 2 Oct 2025 03:31:40 +0000 (03:31 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:34 +0000 (21:28 -0700)
The function pageblock_pfn_to_page() must confirm that the target range is
contained entirely within the current zone.

Originally, when pageblock_pfn_to_page() was introduced by commit
7d49d8868336, it operated on a single range, [pfn, block_end_pfn], for
both range checking and isolation.

However, commit e1409c325fdc ("mm/compaction: pass only pageblock aligned
range to pageblock_pfn_to_page") changed this behavior, causing the
function to operate on two different ranges:

[block_start_pfn, block_end_pfn] is used to check if the range is in the
same zone.

[pfn, block_end_pfn] is used for page isolation.

This split logic fails when start_pfn < zone_start_pfn, even if both are
within the same pageblock.  In this scenario, the checking range
[block_start_pfn, block_end_pfn] is used, which incorrectly misses the
pages before zone_start_pfn.

         start_pfn     zone_start_pfn
    +----+-------------+-------------------+
    block_start_pfn                        block_end_pfn

This oversight allows the range check to pass, even though the isolation
step ([pfn, block_end_pfn]) may attempt to isolate pages belonging to two
different zones.

To fix this, we should revert to using the same range ([block_start_pfn,
block_end_pfn]) for both checking and isolation in each iteration.

Link: https://lkml.kernel.org/r/20251002033140.24462-3-richard.weiyang@gmail.com
Fixes: e1409c325fdc ("mm/compaction: pass only pageblock aligned range to pageblock_pfn_to_page")
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/compaction.c

index 1e8f8eca318c6844c27682677a0a9ea552316828..8760d10bd0b327410440cee8dfc3af1966b54cc1 100644 (file)
@@ -1320,27 +1320,22 @@ int
 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
                                                        unsigned long end_pfn)
 {
-       unsigned long pfn, block_start_pfn, block_end_pfn;
+       unsigned long block_start_pfn, block_end_pfn;
        int ret = 0;
 
        /* Scan block by block. First and last block may be incomplete */
-       pfn = start_pfn;
-       block_start_pfn = pageblock_start_pfn(pfn);
-       if (block_start_pfn < cc->zone->zone_start_pfn)
-               block_start_pfn = cc->zone->zone_start_pfn;
-       block_end_pfn = pageblock_end_pfn(pfn);
+       block_start_pfn = start_pfn;
+       block_end_pfn = pageblock_end_pfn(start_pfn);
 
-       for (; pfn < end_pfn; pfn = block_end_pfn,
-                               block_start_pfn = block_end_pfn,
+       for (; block_start_pfn < end_pfn; block_start_pfn = block_end_pfn,
                                block_end_pfn += pageblock_nr_pages) {
 
                block_end_pfn = min(block_end_pfn, end_pfn);
 
-               if (!pageblock_pfn_to_page(block_start_pfn,
-                                       block_end_pfn, cc->zone))
+               if (!pageblock_pfn_to_page(block_start_pfn, block_end_pfn, cc->zone))
                        continue;
 
-               ret = isolate_migratepages_block(cc, pfn, block_end_pfn,
+               ret = isolate_migratepages_block(cc, block_start_pfn, block_end_pfn,
                                                 ISOLATE_UNEVICTABLE);
 
                if (ret)
@@ -2046,7 +2041,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
 {
        unsigned long block_start_pfn;
        unsigned long block_end_pfn;
-       unsigned long low_pfn;
        struct page *page;
        const isolate_mode_t isolate_mode =
                (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
@@ -2058,20 +2052,17 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
         * initialized by compact_zone(). The first failure will use
         * the lowest PFN as the starting point for linear scanning.
         */
-       low_pfn = fast_find_migrateblock(cc);
-       block_start_pfn = pageblock_start_pfn(low_pfn);
-       if (block_start_pfn < cc->zone->zone_start_pfn)
-               block_start_pfn = cc->zone->zone_start_pfn;
+       block_start_pfn = fast_find_migrateblock(cc);
 
        /*
         * fast_find_migrateblock() has already ensured the pageblock is not
         * set with a skipped flag, so to avoid the isolation_suitable check
         * below again, check whether the fast search was successful.
         */
-       fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
+       fast_find_block = block_start_pfn != cc->migrate_pfn && !cc->fast_search_fail;
 
        /* Only scan within a pageblock boundary */
-       block_end_pfn = pageblock_end_pfn(low_pfn);
+       block_end_pfn = pageblock_end_pfn(block_start_pfn);
 
        /*
         * Iterate over whole pageblocks until we find the first suitable.
@@ -2079,7 +2070,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
         */
        for (; block_end_pfn <= cc->free_pfn;
                        fast_find_block = false,
-                       cc->migrate_pfn = low_pfn = block_end_pfn,
+                       cc->migrate_pfn = block_end_pfn,
                        block_start_pfn = block_end_pfn,
                        block_end_pfn += pageblock_nr_pages) {
 
@@ -2088,7 +2079,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
                 * many pageblocks unsuitable, so periodically check if we
                 * need to schedule.
                 */
-               if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
+               if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
                        cond_resched();
 
                page = pageblock_pfn_to_page(block_start_pfn,
@@ -2109,8 +2100,8 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
                 * before making it "skip" so other compaction instances do
                 * not scan the same block.
                 */
-               if ((pageblock_aligned(low_pfn) ||
-                    low_pfn == cc->zone->zone_start_pfn) &&
+               if ((pageblock_aligned(block_start_pfn) ||
+                    block_start_pfn == cc->zone->zone_start_pfn) &&
                    !fast_find_block && !isolation_suitable(cc, page))
                        continue;
 
@@ -2128,7 +2119,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
                }
 
                /* Perform the isolation */
-               if (isolate_migratepages_block(cc, low_pfn, block_end_pfn,
+               if (isolate_migratepages_block(cc, block_start_pfn, block_end_pfn,
                                                isolate_mode))
                        return ISOLATE_ABORT;