]> www.infradead.org Git - nvme.git/commitdiff
mm/compaction: merge end_pfn boundary check in isolate_freepages_range
authorKemeng Shi <shikemeng@huawei.com>
Thu, 3 Aug 2023 09:48:59 +0000 (17:48 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 21 Aug 2023 20:37:39 +0000 (13:37 -0700)
Merge the end_pfn boundary check for single page block forward and
multiple page blocks forward to avoid do twice boundary check for multiple
page blocks forward.

Link: https://lkml.kernel.org/r/20230803094901.2915942-3-shikemeng@huaweicloud.com
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/compaction.c

index 6e0c7456026b5500adb7163fa986b6310a79c236..d32929f39dc472f80a09d90c2de601c74604b979 100644 (file)
@@ -740,8 +740,6 @@ isolate_freepages_range(struct compact_control *cc,
                /* Protect pfn from changing by isolate_freepages_block */
                unsigned long isolate_start_pfn = pfn;
 
-               block_end_pfn = min(block_end_pfn, end_pfn);
-
                /*
                 * pfn could pass the block_end_pfn if isolated freepage
                 * is more than pageblock order. In this case, we adjust
@@ -750,9 +748,10 @@ isolate_freepages_range(struct compact_control *cc,
                if (pfn >= block_end_pfn) {
                        block_start_pfn = pageblock_start_pfn(pfn);
                        block_end_pfn = pageblock_end_pfn(pfn);
-                       block_end_pfn = min(block_end_pfn, end_pfn);
                }
 
+               block_end_pfn = min(block_end_pfn, end_pfn);
+
                if (!pageblock_pfn_to_page(block_start_pfn,
                                        block_end_pfn, cc->zone))
                        break;