return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags);
}
-static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
- pte_t oldpte, pte_t *pte, int target_node,
- struct folio *folio)
+static bool prot_numa_skip(struct vm_area_struct *vma, int target_node,
+ struct folio *folio)
{
bool ret = true;
bool toptier;
int nid;
- /* Avoid TLB flush if possible */
- if (pte_protnone(oldpte))
- goto skip;
-
if (!folio)
goto skip;
struct page *page;
pte_t ptent;
+ /* Already in the desired state. */
+ if (prot_numa && pte_protnone(oldpte))
+ continue;
+
page = vm_normal_page(vma, addr, oldpte);
if (page)
folio = page_folio(page);
+
/*
* Avoid trapping faults against the zero or KSM
* pages. See similar comment in change_huge_pmd.
*/
- if (prot_numa) {
- int ret = prot_numa_skip(vma, addr, oldpte, pte,
- target_node, folio);
- if (ret) {
-
- /* determine batch to skip */
- nr_ptes = mprotect_folio_pte_batch(folio,
- pte, oldpte, max_nr_ptes, /* flags = */ 0);
- continue;
- }
+ if (prot_numa & prot_numa_skip(vma, target_node, folio)) {
+ /* determine batch to skip */
+ nr_ptes = mprotect_folio_pte_batch(folio,
+ pte, oldpte, max_nr_ptes, /* flags = */ 0);
+ continue;
}
nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);