]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm-vmscan-make-sure-wakeup_kswapd-with-managed-zone-v2
authorWei Yang <richard.weiyang@gmail.com>
Thu, 14 Apr 2022 19:16:54 +0000 (12:16 -0700)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 14 Apr 2022 21:49:52 +0000 (17:49 -0400)
adjust the usage in migrate_balanced_pgdat()

Link: https://lkml.kernel.org/r/20220329010901.1654-2-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/migrate.c

index 6598a07df18267df791e7b5f5c51dc5654fd0ca1..96d06b2be5fc229a24c402b1289d625c11763034 100644 (file)
@@ -1981,7 +1981,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
 #ifdef CONFIG_NUMA_BALANCING
 /*
  * Returns true if this is a safe migration target node for misplaced NUMA
- * pages. Currently it only checks the watermarks which crude
+ * pages. Currently it only checks the watermarks which is crude.
  */
 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
                                   unsigned long nr_migrate_pages)
@@ -1991,7 +1991,7 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
        for (z = pgdat->nr_zones - 1; z >= 0; z--) {
                struct zone *zone = pgdat->node_zones + z;
 
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                /* Avoid waking kswapd by allocating pages_to_migrate pages. */