]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
ARM: 9166/1: Support KFENCE for ARM
authorWang Kefeng <wangkefeng.wang@huawei.com>
Fri, 3 Dec 2021 09:26:33 +0000 (10:26 +0100)
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Fri, 17 Dec 2021 11:34:38 +0000 (11:34 +0000)
Add architecture specific implementation details for KFENCE and enable
KFENCE on ARM. In particular, this implements the required interface in
 <asm/kfence.h>.

KFENCE requires that attributes for pages from its memory pool can
individually be set. Therefore, force the kfence pool to be mapped
at page granularity.

Testing this patch using the testcases in kfence_test.c and all passed
with or without ARM_LPAE.

Acked-by: Marco Elver <elver@google.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
arch/arm/Kconfig
arch/arm/include/asm/kfence.h [new file with mode: 0644]
arch/arm/mm/fault.c

index f0f9e8bec83acfa6ca3108664f8f815e0715bd34..321b0a1c282013ac2236cc77840d689b1e12368e 100644 (file)
@@ -69,6 +69,7 @@ config ARM
        select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
        select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
+       select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
        select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
        select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/arch/arm/include/asm/kfence.h b/arch/arm/include/asm/kfence.h
new file mode 100644 (file)
index 0000000..7980d0f
--- /dev/null
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_ARM_KFENCE_H
+#define __ASM_ARM_KFENCE_H
+
+#include <linux/kfence.h>
+
+#include <asm/pgalloc.h>
+#include <asm/set_memory.h>
+
+static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
+{
+       int i;
+       unsigned long pfn = PFN_DOWN(__pa(addr));
+       pte_t *pte = pte_alloc_one_kernel(&init_mm);
+
+       if (!pte)
+               return -ENOMEM;
+
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
+       pmd_populate_kernel(&init_mm, pmd, pte);
+
+       flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+       return 0;
+}
+
+static inline bool arch_kfence_init_pool(void)
+{
+       unsigned long addr;
+       pmd_t *pmd;
+
+       for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+            addr += PAGE_SIZE) {
+               pmd = pmd_off_k(addr);
+
+               if (pmd_leaf(*pmd)) {
+                       if (split_pmd_page(pmd, addr & PMD_MASK))
+                               return false;
+               }
+       }
+
+       return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+       set_memory_valid(addr, 1, !protect);
+
+       return true;
+}
+
+#endif /* __ASM_ARM_KFENCE_H */
index 1d41e8fad4f31095347a1147f7046e877818b91c..a1cebe363ed5067db04707814c8c8b237d73d3dd 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/sched/debug.h>
 #include <linux/highmem.h>
 #include <linux/perf_event.h>
+#include <linux/kfence.h>
 
 #include <asm/system_misc.h>
 #include <asm/system_info.h>
@@ -136,10 +137,14 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
        /*
         * No handler, we'll have to terminate things with extreme prejudice.
         */
-       if (addr < PAGE_SIZE)
+       if (addr < PAGE_SIZE) {
                msg = "NULL pointer dereference";
-       else
+       } else {
+               if (kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
+                       return;
+
                msg = "paging request";
+       }
 
        die_kernel_fault(msg, mm, addr, fsr, regs);
 }