]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm: record the migration reason for struct migration_target_control
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Wed, 6 Mar 2024 10:13:26 +0000 (18:13 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:06 +0000 (20:56 -0700)
Patch series "make the hugetlb migration strategy consistent", v2.

As discussed in previous thread [1], there is an inconsistency when
handling hugetlb migration.  When handling the migration of freed hugetlb,
it prevents fallback to other NUMA nodes in
alloc_and_dissolve_hugetlb_folio().  However, when dealing with in-use
hugetlb, it allows fallback to other NUMA nodes in
alloc_hugetlb_folio_nodemask(), which can break the per-node hugetlb pool
and might result in unexpected failures when node bound workloads doesn't
get what is asssumed available.

This patchset tries to make the hugetlb migration strategy more clear
and consistent. Please find details in each patch.

[1]
https://lore.kernel.org/all/6f26ce22d2fcd523418a085f2c588fe0776d46e7.1706794035.git.baolin.wang@linux.alibaba.com/

This patch (of 2):

To support different hugetlb allocation strategies during hugetlb
migration based on various migration reasons, record the migration reason
in the migration_target_control structure as a preparation.

Link: https://lkml.kernel.org/r/cover.1709719720.git.baolin.wang@linux.alibaba.com
Link: https://lkml.kernel.org/r/7b95d4981e07211f57139fc5b1f7ce91b920cee4.1709719720.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/gup.c
mm/internal.h
mm/memory-failure.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/page_alloc.c
mm/vmscan.c

index 8433d3dc31fce19210e71894e9edd92096fb6b48..6d8d15f8c7f9af41561109403fd1c5914f01bc7f 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2145,6 +2145,7 @@ static int migrate_longterm_unpinnable_pages(
                struct migration_target_control mtc = {
                        .nid = NUMA_NO_NODE,
                        .gfp_mask = GFP_USER | __GFP_NOWARN,
+                       .reason = MR_LONGTERM_PIN,
                };
 
                if (migrate_pages(movable_page_list, alloc_migration_target,
index fb219e31f0f0b758c79061c1b54125ce5074755c..63bdac6d04130364532bda7380b765c1f5b92895 100644 (file)
@@ -1045,6 +1045,7 @@ struct migration_target_control {
        int nid;                /* preferred node id */
        nodemask_t *nmask;
        gfp_t gfp_mask;
+       enum migrate_reason reason;
 };
 
 /*
index 0a7a8a4ba421d507b2d0263b791b7d45e86e6f3a..9e50586f2e37e0e0d393a5d3c256c66141f5d445 100644 (file)
@@ -2669,6 +2669,7 @@ static int soft_offline_in_use_page(struct page *page)
        struct migration_target_control mtc = {
                .nid = NUMA_NO_NODE,
                .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+               .reason = MR_MEMORY_FAILURE,
        };
 
        if (!huge && folio_test_large(folio)) {
index a444e2d7dd2bffa3ffe496830a76395f21e7767e..b79ba36e09e03496df38b7f939c583a3e661ce44 100644 (file)
@@ -1841,6 +1841,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                struct migration_target_control mtc = {
                        .nmask = &nmask,
                        .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+                       .reason = MR_MEMORY_HOTPLUG,
                };
                int ret;
 
index 0b3def99174a174046c9dc503aa1b584240d3ecc..e128e6b7bbcbebfe97b0b0c490516ab7e1d0b754 100644 (file)
@@ -1070,6 +1070,7 @@ static long migrate_to_node(struct mm_struct *mm, int source, int dest,
        struct migration_target_control mtc = {
                .nid = dest,
                .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+               .reason = MR_SYSCALL,
        };
 
        nodes_clear(nmask);
index 73a052a382f13a21bd72e23fb5996ae07c3022d3..bde63010a3cf94b021caafef31803672d14ac78a 100644 (file)
@@ -2060,6 +2060,7 @@ static int do_move_pages_to_node(struct list_head *pagelist, int node)
        struct migration_target_control mtc = {
                .nid = node,
                .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+               .reason = MR_SYSCALL,
        };
 
        err = migrate_pages(pagelist, alloc_migration_target, NULL,
index 382d1c98f8e5284906fc7b80dab4f6d4a7a79d22..daab8cab91ccf29ab59bd04195c3d439d4b961d1 100644 (file)
@@ -6351,6 +6351,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
        struct migration_target_control mtc = {
                .nid = zone_to_nid(cc->zone),
                .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+               .reason = MR_CONTIG_RANGE,
        };
        struct page *page;
        unsigned long total_mapped = 0;
index 3ef654addd44c26f999c84ba82a10f005804ece4..289121e76753f46984a5f8d1cfad196417394167 100644 (file)
@@ -967,7 +967,8 @@ static unsigned int demote_folio_list(struct list_head *demote_folios,
                .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN |
                        __GFP_NOMEMALLOC | GFP_NOWAIT,
                .nid = target_nid,
-               .nmask = &allowed_mask
+               .nmask = &allowed_mask,
+               .reason = MR_DEMOTION,
        };
 
        if (list_empty(demote_folios))