In the new set_ptes() API, set_pte_at() (a special case of set_ptes()) is
intended to be instrumented by the page table check facility. There are
however several other routines that constitute the API for setting page
table entries, including set_pmd_at() among others. Such routines are
themselves implemented in terms of set_ptes_at().
A future patch providing support for page table checking on powerpc must
take care to avoid duplicate calls to page_table_check_p{te,md,ud}_set().
Allow for assignment of pte entries without instrumentation through the
set_pte_at_unchecked() routine introduced in this patch.
Cause API-facing routines that call set_pte_at() to instead call
set_pte_at_unchecked(), which will remain uninstrumented by page table
check. set_ptes() is itself implemented by calls to __set_pte_at(), so
this eliminates redundant code.
[ajd@linux.ibm.com: don't change to unchecked for early boot/kernel mappings]
Link: https://lkml.kernel.org/r/20250813062614.51759-13-ajd@linux.ibm.com
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
Acked-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Nicholas Miehlbradt <nicholas@linux.ibm.com>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte, unsigned int nr);
#define set_ptes set_ptes
+void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
WARN_ON(!(pmd_leaf(pmd)));
#endif
trace_hugepage_set_pmd(addr, pmd_val(pmd));
- return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
+ return set_pte_at_unchecked(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
void set_pud_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(!(pud_leaf(pud)));
#endif
trace_hugepage_set_pud(addr, pud_val(pud));
- return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
+ return set_pte_at_unchecked(mm, addr, pudp_ptep(pudp), pud_pte(pud));
}
static void do_serialize(void *arg)
if (radix_enabled())
return radix__ptep_modify_prot_commit(vma, addr,
ptep, old_pte, pte);
- set_pte_at(vma->vm_mm, addr, ptep, pte);
+ set_pte_at_unchecked(vma->vm_mm, addr, ptep, pte);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
(atomic_read(&mm->context.copros) > 0))
radix__flush_tlb_page(vma, addr);
- set_pte_at(mm, addr, ptep, pte);
+ set_pte_at_unchecked(mm, addr, ptep, pte);
}
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
if (!radix_enabled())
return 0;
- set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
+ set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, new_pud);
return 1;
}
if (!radix_enabled())
return 0;
- set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
+ set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, new_pmd);
return 1;
}
}
}
+void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
+ pte = set_pte_filter(pte, addr);
+ __set_pte_at(mm, addr, ptep, pte, 0);
+}
+
void unmap_kernel_page(unsigned long va)
{
pmd_t *pmdp = pmd_off_k(va);