]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
powerpc: mm: support page table check
authorRohan McLure <rmclure@linux.ibm.com>
Wed, 13 Aug 2025 06:26:14 +0000 (16:26 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 18 Aug 2025 05:08:57 +0000 (22:08 -0700)
On creation and clearing of a page table mapping, instrument such calls by
invoking page_table_check_pte_set and page_table_check_pte_clear
respectively.  These calls serve as a sanity check against illegal
mappings.

Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all platforms.

See also:

riscv support in commit 3fee229a8eb9 ("riscv/mm: enable
ARCH_SUPPORTS_PAGE_TABLE_CHECK")
arm64 in commit 42b2547137f5 ("arm64/mm: enable
ARCH_SUPPORTS_PAGE_TABLE_CHECK")
x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
check")

[ajd@linux.ibm.com: rebase]
Link: https://lkml.kernel.org/r/20250813062614.51759-14-ajd@linux.ibm.com
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Nicholas Miehlbradt <nicholas@linux.ibm.com>
Cc: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/32/pgtable.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/nohash/pgtable.h
arch/powerpc/mm/book3s64/hash_pgtable.c
arch/powerpc/mm/book3s64/pgtable.c
arch/powerpc/mm/book3s64/radix_pgtable.c
arch/powerpc/mm/pgtable.c

index 93402a1d9c9fc63e2795cf53c5acb248e0358591..09d192ee3f91935ea19acf497309ec0fb6007548 100644 (file)
@@ -170,6 +170,7 @@ config PPC
        select ARCH_STACKWALK
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_DEBUG_PAGEALLOC    if PPC_BOOK3S || PPC_8xx
+       select ARCH_SUPPORTS_PAGE_TABLE_CHECK
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF         if PPC64
        select ARCH_USE_MEMTEST
index b225967f85ea14871560ffbc75131f1376ddc0c4..68864a71ca5f97120915919fda10ae8849e356d2 100644 (file)
@@ -202,6 +202,7 @@ void unmap_kernel_page(unsigned long va);
 #ifndef __ASSEMBLY__
 #include <linux/sched.h>
 #include <linux/threads.h>
+#include <linux/page_table_check.h>
 
 /* Bits to mask out from a PGD to get to the PUD page */
 #define PGD_MASKED_BITS                0
@@ -315,7 +316,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
                                       pte_t *ptep)
 {
-       return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
+       pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
+
+       page_table_check_pte_clear(mm, addr, old_pte);
+
+       return old_pte;
 }
 
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
index 48f3a41317dd75d3cc1e287df507603af99a7373..81c220bcbd26793fbb758aeee1dd940793863885 100644 (file)
 #define PAGE_KERNEL_ROX        __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
 
 #ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
+
 /*
  * page table defines
  */
@@ -416,8 +418,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
                                       unsigned long addr, pte_t *ptep)
 {
-       unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
-       return __pte(old);
+       pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+
+       page_table_check_pte_clear(mm, addr, old_pte);
+
+       return old_pte;
 }
 
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
@@ -426,11 +431,16 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
                                            pte_t *ptep, int full)
 {
        if (full && radix_enabled()) {
+               pte_t old_pte;
+
                /*
                 * We know that this is a full mm pte clear and
                 * hence can be sure there is no parallel set_pte.
                 */
-               return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+               old_pte = radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+               page_table_check_pte_clear(mm, addr, old_pte);
+
+               return old_pte;
        }
        return ptep_get_and_clear(mm, addr, ptep);
 }
@@ -1289,19 +1299,34 @@ extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
                                            unsigned long addr, pmd_t *pmdp)
 {
-       if (radix_enabled())
-               return radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
-       return hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
+       pmd_t old_pmd;
+
+       if (radix_enabled()) {
+               old_pmd = radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
+       } else {
+               old_pmd = hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
+       }
+
+       page_table_check_pmd_clear(mm, addr, old_pmd);
+
+       return old_pmd;
 }
 
 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
                                            unsigned long addr, pud_t *pudp)
 {
-       if (radix_enabled())
-               return radix__pudp_huge_get_and_clear(mm, addr, pudp);
-       BUG();
-       return *pudp;
+       pud_t old_pud;
+
+       if (radix_enabled()) {
+               old_pud = radix__pudp_huge_get_and_clear(mm, addr, pudp);
+       } else {
+               BUG();
+       }
+
+       page_table_check_pud_clear(mm, addr, old_pud);
+
+       return old_pud;
 }
 
 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
index a8bc4f24beb154929e58a8cb1f90e611f77c7790..3a6630dca6156b99740bc139953a885d361a5ffc 100644 (file)
@@ -29,6 +29,8 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
 
 #ifndef __ASSEMBLY__
 
+#include <linux/page_table_check.h>
+
 extern int icache_44x_need_flush;
 
 #ifndef pte_huge_size
@@ -122,7 +124,11 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
                                       pte_t *ptep)
 {
-       return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+       pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+
+       page_table_check_pte_clear(mm, addr, old_pte);
+
+       return old_pte;
 }
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 
index 82d31177630b810a70c9e7797f1f163fa97ccfaf..ac2a24d15d2e3733bd013eb168b2656ad61e7af7 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/sched.h>
 #include <linux/mm_types.h>
 #include <linux/mm.h>
+#include <linux/page_table_check.h>
 #include <linux/stop_machine.h>
 
 #include <asm/sections.h>
@@ -230,6 +231,9 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
 
        pmd = *pmdp;
        pmd_clear(pmdp);
+
+       page_table_check_pmd_clear(vma->vm_mm, address, pmd);
+
        /*
         * Wait for all pending hash_page to finish. This is needed
         * in case of subpage collapse. When we collapse normal pages
index ff0c5a1988f81a9748c58722e16e6758befca94e..8be06a3cfcbc3dd93cb1fd1bdfd5486db09579cc 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/pkeys.h>
 #include <linux/debugfs.h>
 #include <linux/proc_fs.h>
+#include <linux/page_table_check.h>
 
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
@@ -127,6 +128,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
        WARN_ON(!(pmd_leaf(pmd)));
 #endif
        trace_hugepage_set_pmd(addr, pmd_val(pmd));
+       page_table_check_pmd_set(mm, addr, pmdp, pmd);
        return set_pte_at_unchecked(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
 }
 
@@ -144,6 +146,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr,
        WARN_ON(!(pud_leaf(pud)));
 #endif
        trace_hugepage_set_pud(addr, pud_val(pud));
+       page_table_check_pud_set(mm, addr, pudp, pud);
        return set_pte_at_unchecked(mm, addr, pudp_ptep(pudp), pud_pte(pud));
 }
 
@@ -179,12 +182,14 @@ void serialize_against_pte_lookup(struct mm_struct *mm)
 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                     pmd_t *pmdp)
 {
-       unsigned long old_pmd;
+       pmd_t old_pmd;
 
        VM_WARN_ON_ONCE(!pmd_present(*pmdp));
-       old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
+       old_pmd = __pmd(pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID));
        flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
-       return __pmd(old_pmd);
+       page_table_check_pmd_clear(vma->vm_mm, address, old_pmd);
+
+       return old_pmd;
 }
 
 pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
index 8c3b0f1750e48101383f929c55f26b7d1c4f3849..00b178d41e0439734e3be32d6a65ae1170704896 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <linux/mm.h>
+#include <linux/page_table_check.h>
 #include <linux/hugetlb.h>
 #include <linux/string_helpers.h>
 #include <linux/memory.h>
@@ -1474,6 +1475,8 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
        pmd = *pmdp;
        pmd_clear(pmdp);
 
+       page_table_check_pmd_clear(vma->vm_mm, address, pmd);
+
        radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
 
        return pmd;
index bf90becb6cd6c3b2fa68816db872512bb7c4537c..42aa850693af75f670d87e0941121eb5d3c54ee1 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/mm.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
+#include <linux/page_table_check.h>
 #include <linux/hugetlb.h>
 #include <asm/tlbflush.h>
 #include <asm/tlb.h>
@@ -206,6 +207,9 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
         * and not hw_valid ptes. Hence there is no translation cache flush
         * involved that need to be batched.
         */
+
+       page_table_check_ptes_set(mm, addr, ptep, pte, nr);
+
        for (;;) {
 
                /*