From: Kefeng Wang Date: Fri, 16 Aug 2024 09:04:35 +0000 (+0800) Subject: mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=02356fb808cf4d3786f69db094e4f6b4e8b368e7;p=users%2Fjedix%2Flinux-maple.git mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation Use the isolate_folio_to_list() to unify hugetlb/LRU/non-LRU folio isolation, which cleanup code a bit and save a few calls to compound_head(). Link: https://lkml.kernel.org/r/20240816090435.888946-6-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Kalesh Singh Cc: Shaohua Li Cc: Suren Baghdasaryan Cc: Yu Zhao Signed-off-by: Andrew Morton --- diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 02a0d4fbc3fe..cc9c16db2f8c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1773,14 +1773,14 @@ found: static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; - struct page *page; LIST_HEAD(source); + struct folio *folio; static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); for (pfn = start_pfn; pfn < end_pfn; pfn++) { - struct folio *folio; - bool isolated; + struct page *page; + bool huge; if (!pfn_valid(pfn)) continue; @@ -1812,34 +1812,22 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) continue; } - if (folio_test_hugetlb(folio)) { - isolate_hugetlb(folio, &source); - continue; + huge = folio_test_hugetlb(folio); + if (!huge) { + folio = folio_get_nontail_page(page); + if (!folio) + continue; } - if (!get_page_unless_zero(page)) - continue; - /* - * We can skip free pages. And we can deal with pages on - * LRU and non-lru movable pages. - */ - if (PageLRU(page)) - isolated = isolate_lru_page(page); - else - isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE); - if (isolated) { - list_add_tail(&page->lru, &source); - if (!__PageMovable(page)) - inc_node_page_state(page, NR_ISOLATED_ANON + - page_is_file_lru(page)); - - } else { + if (!isolate_folio_to_list(folio, &source)) { if (__ratelimit(&migrate_rs)) { pr_warn("failed to isolate pfn %lx\n", pfn); dump_page(page, "isolation failed"); } } - put_page(page); + + if (!huge) + folio_put(folio); } if (!list_empty(&source)) { nodemask_t nmask = node_states[N_MEMORY]; @@ -1854,7 +1842,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) * We have checked that migration range is on a single zone so * we can use the nid of the first page to all the others. */ - mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); + mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru)); /* * try to allocate from a different node but reuse this node @@ -1867,11 +1855,12 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) ret = migrate_pages(&source, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL); if (ret) { - list_for_each_entry(page, &source, lru) { + list_for_each_entry(folio, &source, lru) { if (__ratelimit(&migrate_rs)) { pr_warn("migrating pfn %lx failed ret:%d\n", - page_to_pfn(page), ret); - dump_page(page, "migration failure"); + folio_pfn(folio), ret); + dump_page(&folio->page, + "migration failure"); } } putback_movable_pages(&source);