This reverts commit
a3b837130b58 ("mm/page_table_check: remove unused
parameter in [__]page_table_check_pmd_set").
Reinstate previously unused parameters for the purpose of supporting
powerpc platforms, as many do not encode user/kernel ownership of the page
in the pte, but instead in the address of the access.
Apply this to __page_table_check_pmds_set(), page_table_check_pmd_set(),
and the page_table_check_pmd_set() wrapper macro.
[ajd@linux.ibm.com: rebase on arm64 + riscv changes, update commit message]
Link: https://lkml.kernel.org/r/20250813062614.51759-5-ajd@linux.ibm.com
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: Ingo Molnar <mingo@kernel.org> [x86]
Acked-by: Alexandre Ghiti <alexghiti@rivosinc.com> [riscv]
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Nicholas Miehlbradt <nicholas@linux.ibm.com>
Cc: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
page_table_check_ptes_set(mm, ptep, pte, nr);
break;
case PMD_SIZE:
- page_table_check_pmds_set(mm, (pmd_t *)ptep, pte_pmd(pte), nr);
+ page_table_check_pmds_set(mm, addr, (pmd_t *)ptep,
+ pte_pmd(pte), nr);
break;
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
+ page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
}
#endif
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(mm, pmdp, pmd);
+ page_table_check_pmd_set(mm, addr, pmdp, pmd);
return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd));
}
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
+ page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
}
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(mm, pmdp, pmd);
+ page_table_check_pmd_set(mm, addr, pmdp, pmd);
set_pmd(pmdp, pmd);
}
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
+ page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
if (IS_ENABLED(CONFIG_SMP)) {
return xchg(pmdp, pmd);
} else {
void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
unsigned int nr);
-void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
- unsigned int nr);
+void __page_table_check_pmds_set(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr);
void __page_table_check_puds_set(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud, unsigned int nr);
void __page_table_check_pte_clear_range(struct mm_struct *mm,
}
static inline void page_table_check_pmds_set(struct mm_struct *mm,
- pmd_t *pmdp, pmd_t pmd, unsigned int nr)
+ unsigned long addr, pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmds_set(mm, pmdp, pmd, nr);
+ __page_table_check_pmds_set(mm, addr, pmdp, pmd, nr);
}
static inline void page_table_check_puds_set(struct mm_struct *mm,
}
static inline void page_table_check_pmds_set(struct mm_struct *mm,
- pmd_t *pmdp, pmd_t pmd, unsigned int nr)
+ unsigned long addr, pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
}
#endif /* CONFIG_PAGE_TABLE_CHECK */
-#define page_table_check_pmd_set(mm, pmdp, pmd) page_table_check_pmds_set(mm, pmdp, pmd, 1)
+#define page_table_check_pmd_set(mm, addr, pmdp, pmd) page_table_check_pmds_set(mm, addr, pmdp, pmd, 1)
#define page_table_check_pud_set(mm, addr, pudp, pud) page_table_check_puds_set(mm, addr, pudp, pud, 1)
#endif /* __LINUX_PAGE_TABLE_CHECK_H */
WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd)));
}
-void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
- unsigned int nr)
+void __page_table_check_pmds_set(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
unsigned long stride = PMD_SIZE >> PAGE_SHIFT;
unsigned int i;