]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: migrate: convert numamigrate_isolate_page() to numamigrate_isolate_folio()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Wed, 13 Sep 2023 09:51:26 +0000 (17:51 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Oct 2023 17:32:27 +0000 (10:32 -0700)
Rename numamigrate_isolate_page() to numamigrate_isolate_folio(), then
make it takes a folio and use folio API to save compound_head() calls.

Link: https://lkml.kernel.org/r/20230913095131.2426871-4-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/migrate.c

index 0030703204d3ef4eda32069e8ce14c35fad910d3..1f1aebe8da18fb35a0662feaf457a013bf84e307 100644 (file)
@@ -2481,10 +2481,9 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src,
        return __folio_alloc_node(gfp, order, nid);
 }
 
-static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
+static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
 {
-       int nr_pages = thp_nr_pages(page);
-       int order = compound_order(page);
+       int nr_pages = folio_nr_pages(folio);
 
        /* Avoid migrating to a node that is nearly full */
        if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
@@ -2496,22 +2495,23 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
                        if (managed_zone(pgdat->node_zones + z))
                                break;
                }
-               wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
+               wakeup_kswapd(pgdat->node_zones + z, 0,
+                             folio_order(folio), ZONE_MOVABLE);
                return 0;
        }
 
-       if (!isolate_lru_page(page))
+       if (!folio_isolate_lru(folio))
                return 0;
 
-       mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
+       node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
                            nr_pages);
 
        /*
-        * Isolating the page has taken another reference, so the
-        * caller's reference can be safely dropped without the page
+        * Isolating the folio has taken another reference, so the
+        * caller's reference can be safely dropped without the folio
         * disappearing underneath us during migration.
         */
-       put_page(page);
+       folio_put(folio);
        return 1;
 }
 
@@ -2545,7 +2545,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
        if (page_is_file_lru(page) && PageDirty(page))
                goto out;
 
-       isolated = numamigrate_isolate_page(pgdat, page);
+       isolated = numamigrate_isolate_folio(pgdat, page_folio(page));
        if (!isolated)
                goto out;