From: Kefeng Wang Date: Tue, 14 Oct 2025 11:33:49 +0000 (+0800) Subject: mm: huge_memory: use folio_skip_prot_numa() for pmd folio X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=550b531346a7e4e7ad31813d0d1d6a6d8c10a06f;p=users%2Fjedix%2Flinux-maple.git mm: huge_memory: use folio_skip_prot_numa() for pmd folio Rename prot_numa_skip() to folio_skip_prot_numa(), and remove ret by directly return value instead of goto style. The folio skip checks for prot numa should be suitable for pmd folio too, which helps to avoid unnecessary pmd change and folio migration attempts. Link: https://lkml.kernel.org/r/20251014113349.2618158-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Sidhartha Kumar Cc: Baolin Wang Cc: Barry Song Cc: David Hildenbrand Cc: Dev Jain Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Ryan Roberts Cc: Zi Yan Signed-off-by: Andrew Morton --- diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 46ed647f85c1..bfb52c564fb3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2477,8 +2477,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, #endif if (prot_numa) { - struct folio *folio; - bool toptier; + int target_node = NUMA_NO_NODE; /* * Avoid trapping faults against the zero page. The read-only * data is likely to be read-cached on the local CPU and @@ -2490,19 +2489,13 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (pmd_protnone(*pmd)) goto unlock; - folio = pmd_folio(*pmd); - toptier = node_is_toptier(folio_nid(folio)); - /* - * Skip scanning top tier node if normal numa - * balancing is disabled - */ - if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && - toptier) - goto unlock; + /* Get target node for single threaded private VMAs */ + if (!(vma->vm_flags & VM_SHARED) && + atomic_read(&vma->vm_mm->mm_users) == 1) + target_node = numa_node_id(); - if (folio_use_access_time(folio)) - folio_xchg_access_time(folio, - jiffies_to_msecs(jiffies)); + if (folio_skip_prot_numa(pmd_folio(*pmd), vma, target_node)) + goto unlock; } /* * In case prot_numa, we are under mmap_read_lock(mm). It's critical diff --git a/mm/internal.h b/mm/internal.h index a2555be247e5..cbd3d897b16c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1381,6 +1381,8 @@ void vunmap_range_noflush(unsigned long start, unsigned long end); void __vunmap_range_noflush(unsigned long start, unsigned long end); +bool folio_skip_prot_numa(struct folio *folio, struct vm_area_struct *vma, + int target_node); int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, unsigned long addr, int *flags, bool writable, int *last_cpupid); diff --git a/mm/mprotect.c b/mm/mprotect.c index c998cd226c71..c090bc063a31 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -118,26 +118,21 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep, return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags); } -static bool prot_numa_skip(struct vm_area_struct *vma, int target_node, - struct folio *folio) +bool folio_skip_prot_numa(struct folio *folio, struct vm_area_struct *vma, + int target_node) { - bool ret = true; - bool toptier; int nid; - if (!folio) - goto skip; - - if (folio_is_zone_device(folio) || folio_test_ksm(folio)) - goto skip; + if (!folio || folio_is_zone_device(folio) || folio_test_ksm(folio)) + return true; /* Also skip shared copy-on-write folios */ if (is_cow_mapping(vma->vm_flags) && folio_maybe_mapped_shared(folio)) - goto skip; + return true; /* Folios are pinned and can't be migrated */ if (folio_maybe_dma_pinned(folio)) - goto skip; + return true; /* * While migration can move some dirty pages, @@ -145,7 +140,7 @@ static bool prot_numa_skip(struct vm_area_struct *vma, int target_node, * context. */ if (folio_is_file_lru(folio) && folio_test_dirty(folio)) - goto skip; + return true; /* * Don't mess with PTEs if page is already on the node @@ -153,23 +148,20 @@ static bool prot_numa_skip(struct vm_area_struct *vma, int target_node, */ nid = folio_nid(folio); if (target_node == nid) - goto skip; - - toptier = node_is_toptier(nid); + return true; /* * Skip scanning top tier node if normal numa * balancing is disabled */ - if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && toptier) - goto skip; + if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && + node_is_toptier(nid)) + return true; - ret = false; if (folio_use_access_time(folio)) folio_xchg_access_time(folio, jiffies_to_msecs(jiffies)); -skip: - return ret; + return false; } /* Set nr_ptes number of ptes, starting from idx */ @@ -314,7 +306,8 @@ static long change_pte_range(struct mmu_gather *tlb, * Avoid trapping faults against the zero or KSM * pages. See similar comment in change_huge_pmd. */ - if (prot_numa & prot_numa_skip(vma, target_node, folio)) { + if (prot_numa & folio_skip_prot_numa(folio, vma, + target_node)) { /* determine batch to skip */ nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, /* flags = */ 0);