#endif
if (prot_numa) {
- struct folio *folio;
- bool toptier;
+ int target_node = NUMA_NO_NODE;
/*
* Avoid trapping faults against the zero page. The read-only
* data is likely to be read-cached on the local CPU and
if (pmd_protnone(*pmd))
goto unlock;
- folio = pmd_folio(*pmd);
- toptier = node_is_toptier(folio_nid(folio));
- /*
- * Skip scanning top tier node if normal numa
- * balancing is disabled
- */
- if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
- toptier)
- goto unlock;
+ /* Get target node for single threaded private VMAs */
+ if (!(vma->vm_flags & VM_SHARED) &&
+ atomic_read(&vma->vm_mm->mm_users) == 1)
+ target_node = numa_node_id();
- if (folio_use_access_time(folio))
- folio_xchg_access_time(folio,
- jiffies_to_msecs(jiffies));
+ if (folio_skip_prot_numa(pmd_folio(*pmd), vma, target_node))
+ goto unlock;
}
/*
* In case prot_numa, we are under mmap_read_lock(mm). It's critical
void __vunmap_range_noflush(unsigned long start, unsigned long end);
+bool folio_skip_prot_numa(struct folio *folio, struct vm_area_struct *vma,
+ int target_node);
int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
unsigned long addr, int *flags, bool writable,
int *last_cpupid);
return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags);
}
-static bool prot_numa_skip(struct vm_area_struct *vma, int target_node,
- struct folio *folio)
+bool folio_skip_prot_numa(struct folio *folio, struct vm_area_struct *vma,
+ int target_node)
{
- bool ret = true;
- bool toptier;
int nid;
- if (!folio)
- goto skip;
-
- if (folio_is_zone_device(folio) || folio_test_ksm(folio))
- goto skip;
+ if (!folio || folio_is_zone_device(folio) || folio_test_ksm(folio))
+ return true;
/* Also skip shared copy-on-write folios */
if (is_cow_mapping(vma->vm_flags) && folio_maybe_mapped_shared(folio))
- goto skip;
+ return true;
/* Folios are pinned and can't be migrated */
if (folio_maybe_dma_pinned(folio))
- goto skip;
+ return true;
/*
* While migration can move some dirty pages,
* context.
*/
if (folio_is_file_lru(folio) && folio_test_dirty(folio))
- goto skip;
+ return true;
/*
* Don't mess with PTEs if page is already on the node
*/
nid = folio_nid(folio);
if (target_node == nid)
- goto skip;
-
- toptier = node_is_toptier(nid);
+ return true;
/*
* Skip scanning top tier node if normal numa
* balancing is disabled
*/
- if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && toptier)
- goto skip;
+ if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
+ node_is_toptier(nid))
+ return true;
- ret = false;
if (folio_use_access_time(folio))
folio_xchg_access_time(folio, jiffies_to_msecs(jiffies));
-skip:
- return ret;
+ return false;
}
/* Set nr_ptes number of ptes, starting from idx */
* Avoid trapping faults against the zero or KSM
* pages. See similar comment in change_huge_pmd.
*/
- if (prot_numa & prot_numa_skip(vma, target_node, folio)) {
+ if (prot_numa & folio_skip_prot_numa(folio, vma,
+ target_node)) {
/* determine batch to skip */
nr_ptes = mprotect_folio_pte_batch(folio,
pte, oldpte, max_nr_ptes, /* flags = */ 0);