]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation
authorKefeng Wang <wangkefeng.wang@huawei.com>
Tue, 27 Aug 2024 11:47:28 +0000 (19:47 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Sep 2024 04:15:59 +0000 (21:15 -0700)
Use the isolate_folio_to_list() to unify hugetlb/LRU/non-LRU folio
isolation, which cleanup code a bit and save a few calls to
compound_head().

[wangkefeng.wang@huawei.com: various fixes]
Link: https://lkml.kernel.org/r/20240829150500.2599549-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20240827114728.3212578-6-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory_hotplug.c

index 6bdac4cddd9f7040c9ea1d79a8aa836f6e19123e..4c6e3b1812046301cb4931d8c1025e07a25371a5 100644 (file)
@@ -1772,15 +1772,14 @@ found:
 
 static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 {
+       struct folio *folio;
        unsigned long pfn;
-       struct page *page;
        LIST_HEAD(source);
        static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
                                      DEFAULT_RATELIMIT_BURST);
 
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
-               struct folio *folio;
-               bool isolated;
+               struct page *page;
 
                if (!pfn_valid(pfn))
                        continue;
@@ -1811,34 +1810,21 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                        continue;
                }
 
-               if (folio_test_hugetlb(folio)) {
-                       isolate_hugetlb(folio, &source);
+               if (!folio_try_get(folio))
                        continue;
-               }
 
-               if (!get_page_unless_zero(page))
-                       continue;
-               /*
-                * We can skip free pages. And we can deal with pages on
-                * LRU and non-lru movable pages.
-                */
-               if (PageLRU(page))
-                       isolated = isolate_lru_page(page);
-               else
-                       isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
-               if (isolated) {
-                       list_add_tail(&page->lru, &source);
-                       if (!__PageMovable(page))
-                               inc_node_page_state(page, NR_ISOLATED_ANON +
-                                                   page_is_file_lru(page));
+               if (unlikely(page_folio(page) != folio))
+                       goto put_folio;
 
-               } else {
+               if (!isolate_folio_to_list(folio, &source)) {
                        if (__ratelimit(&migrate_rs)) {
-                               pr_warn("failed to isolate pfn %lx\n", pfn);
+                               pr_warn("failed to isolate pfn %lx\n",
+                                       page_to_pfn(page));
                                dump_page(page, "isolation failed");
                        }
                }
-               put_page(page);
+put_folio:
+               folio_put(folio);
        }
        if (!list_empty(&source)) {
                nodemask_t nmask = node_states[N_MEMORY];
@@ -1853,7 +1839,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                 * We have checked that migration range is on a single zone so
                 * we can use the nid of the first page to all the others.
                 */
-               mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
+               mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru));
 
                /*
                 * try to allocate from a different node but reuse this node
@@ -1866,11 +1852,12 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                ret = migrate_pages(&source, alloc_migration_target, NULL,
                        (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
                if (ret) {
-                       list_for_each_entry(page, &source, lru) {
+                       list_for_each_entry(folio, &source, lru) {
                                if (__ratelimit(&migrate_rs)) {
                                        pr_warn("migrating pfn %lx failed ret:%d\n",
-                                               page_to_pfn(page), ret);
-                                       dump_page(page, "migration failure");
+                                               folio_pfn(folio), ret);
+                                       dump_page(&folio->page,
+                                                 "migration failure");
                                }
                        }
                        putback_movable_pages(&source);