From: Andrew Donnellan Date: Wed, 13 Aug 2025 06:26:02 +0000 (+1000) Subject: arm64/mm: add addr parameter to __set_ptes_anysz() X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=9ab49a280140e2604e507305d71785112be20ee5;p=users%2Fjedix%2Flinux-maple.git arm64/mm: add addr parameter to __set_ptes_anysz() Patch series "Support page table check on PowerPC", v16. Support page table check on all PowerPC platforms. This works by serialising assignments, reassignments and clears of page table entries at each level in order to ensure that anonymous mappings have at most one writable consumer, and likewise that file-backed mappings are not simultaneously also anonymous mappings. In order to support this infrastructure, a number of stubs must be defined for all powerpc platforms. Additionally, separate set_pte_at() and set_pte_at_unchecked(), to allow for internal, uninstrumented mappings. On some PowerPC platforms, implementing {pte,pmd,pud}_user_accessible_page() requires the address. We revert previous changes that removed the address parameter from various interfaces, and add it to some other interfaces, in order to allow this. (This series was initially written by Rohan McLure, who has left IBM and is no longer working on powerpc.) This patch (of 13): To provide support for page table check on powerpc, we need to reinstate the address parameter in several functions, including page_table_check_{ptes,pmds,puds}_set(). In preparation for this, add the addr parameter to arm64's __set_ptes_anysz() and change its callsites accordingly. Link: https://lkml.kernel.org/r/20250813062614.51759-1-ajd@linux.ibm.com Link: https://lkml.kernel.org/r/20250813062614.51759-2-ajd@linux.ibm.com Signed-off-by: Andrew Donnellan Cc: Christophe Leroy Cc: Nicholas Miehlbradt Cc: Pasha Tatashin Cc: Sweet Tea Dorminy Cc: Ingo Molnar Cc: Madhavan Srinivasan Cc: Rohan McLure Cc: Catalin Marinas Cc: Will Deacon Cc: Alexandre Ghiti Signed-off-by: Andrew Morton --- diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index abd2dee416b3..ed644be48d87 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -698,8 +698,8 @@ static inline pgprot_t pud_pgprot(pud_t pud) return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud)); } -static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep, - pte_t pte, unsigned int nr, +static inline void __set_ptes_anysz(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr, unsigned long pgsize) { unsigned long stride = pgsize >> PAGE_SHIFT; @@ -734,26 +734,23 @@ static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep, __set_pte_complete(pte); } -static inline void __set_ptes(struct mm_struct *mm, - unsigned long __always_unused addr, +static inline void __set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { - __set_ptes_anysz(mm, ptep, pte, nr, PAGE_SIZE); + __set_ptes_anysz(mm, addr, ptep, pte, nr, PAGE_SIZE); } -static inline void __set_pmds(struct mm_struct *mm, - unsigned long __always_unused addr, +static inline void __set_pmds(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd, unsigned int nr) { - __set_ptes_anysz(mm, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE); + __set_ptes_anysz(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE); } #define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1) -static inline void __set_puds(struct mm_struct *mm, - unsigned long __always_unused addr, +static inline void __set_puds(struct mm_struct *mm, unsigned long addr, pud_t *pudp, pud_t pud, unsigned int nr) { - __set_ptes_anysz(mm, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE); + __set_ptes_anysz(mm, addr, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE); } #define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 1d90a7e75333..1003b5020752 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -225,8 +225,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, ncontig = num_contig_ptes(sz, &pgsize); if (!pte_present(pte)) { - for (i = 0; i < ncontig; i++, ptep++) - __set_ptes_anysz(mm, ptep, pte, 1, pgsize); + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + __set_ptes_anysz(mm, addr, ptep, pte, 1, pgsize); return; } @@ -234,7 +234,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, if (pte_cont(pte) && pte_valid(__ptep_get(ptep))) clear_flush(mm, addr, ptep, pgsize, ncontig); - __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); + __set_ptes_anysz(mm, addr, ptep, pte, ncontig, pgsize); } pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, @@ -449,7 +449,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, if (pte_young(orig_pte)) pte = pte_mkyoung(pte); - __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); + __set_ptes_anysz(mm, addr, ptep, pte, ncontig, pgsize); return 1; } @@ -473,7 +473,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); pte = pte_wrprotect(pte); - __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); + __set_ptes_anysz(mm, addr, ptep, pte, ncontig, pgsize); } pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,