]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm: Use for_each_valid_pfn() in memory_hotplug
authorDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 4 Apr 2025 14:00:25 +0000 (15:00 +0100)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Fri, 25 Apr 2025 22:57:53 +0000 (23:57 +0100)
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: David Hildenbrand <david@redhat.com>
mm/memory_hotplug.c

index 8305483de38bb78f1f05e9aa6c329624d6f53b5c..b1caedbade5b166363c9926d98adb98d6e711528 100644 (file)
@@ -1756,12 +1756,10 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
 {
        unsigned long pfn;
 
-       for (pfn = start; pfn < end; pfn++) {
+       for_each_valid_pfn(pfn, start, end) {
                struct page *page;
                struct folio *folio;
 
-               if (!pfn_valid(pfn))
-                       continue;
                page = pfn_to_page(pfn);
                if (PageLRU(page))
                        goto found;
@@ -1805,11 +1803,9 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
        static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
                                      DEFAULT_RATELIMIT_BURST);
 
-       for (pfn = start_pfn; pfn < end_pfn; pfn++) {
+       for_each_valid_pfn(pfn, start_pfn, end_pfn) {
                struct page *page;
 
-               if (!pfn_valid(pfn))
-                       continue;
                page = pfn_to_page(pfn);
                folio = page_folio(page);