]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm: memory_hotplug: remove head variable in do_migrate_range()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Tue, 27 Aug 2024 11:47:24 +0000 (19:47 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Sep 2024 04:15:58 +0000 (21:15 -0700)
Patch series "mm: memory_hotplug: improve do_migrate_range()", v3.

Unify hwpoisoned page handling and isolation of HugeTLB/LRU/non-LRU
movable page, also convert to use folios in do_migrate_range().

This patch (of 5):

Directly use a folio for HugeTLB and THP when calculate the next pfn, then
remove unused head variable.

Link: https://lkml.kernel.org/r/20240827114728.3212578-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20240827114728.3212578-2-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory_hotplug.c

index df291f2e509d8938a749329218b117db0ac7567e..3f8a109d51293b33bb23259aff582c024c76208a 100644 (file)
@@ -1773,7 +1773,7 @@ found:
 static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long pfn;
-       struct page *page, *head;
+       struct page *page;
        LIST_HEAD(source);
        static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
                                      DEFAULT_RATELIMIT_BURST);
@@ -1786,14 +1786,20 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                        continue;
                page = pfn_to_page(pfn);
                folio = page_folio(page);
-               head = &folio->page;
 
-               if (PageHuge(page)) {
-                       pfn = page_to_pfn(head) + compound_nr(head) - 1;
-                       isolate_hugetlb(folio, &source);
-                       continue;
-               } else if (PageTransHuge(page))
-                       pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
+               /*
+                * No reference or lock is held on the folio, so it might
+                * be modified concurrently (e.g. split).  As such,
+                * folio_nr_pages() may read garbage.  This is fine as the outer
+                * loop will revisit the split folio later.
+                */
+               if (folio_test_large(folio)) {
+                       pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
+                       if (folio_test_hugetlb(folio)) {
+                               isolate_hugetlb(folio, &source);
+                               continue;
+                       }
+               }
 
                /*
                 * HWPoison pages have elevated reference counts so the migration would