From: Anshuman Khandual Date: Thu, 14 Apr 2022 06:07:06 +0000 (-0700) Subject: powerpc/mm: enable ARCH_HAS_VM_GET_PAGE_PROT X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=f985ca9744eb0c17c36a3d16fb09b4d2e3dc3193;p=users%2Fjedix%2Flinux-maple.git powerpc/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This defines and exports a platform specific custom vm_get_page_prot() via subscribing ARCH_HAS_VM_GET_PAGE_PROT. While here, this also localizes arch_vm_get_page_prot() as powerpc_vm_get_page_prot() and moves it near vm_get_page_prot(). Link: https://lkml.kernel.org/r/20220407103251.1209606-3-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Michael Ellerman Cc: Paul Mackerras Cc: Catalin Marinas Cc: Christoph Hellwig Cc: "David S. Miller" Cc: Ingo Molnar Cc: Khalid Aziz Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Andrew Morton --- diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 174edabb74fa..eb9b6ddbf92f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -140,6 +140,7 @@ config PPC select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK select ARCH_MIGHT_HAVE_PC_PARPORT diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 7cb6d18f5cd6..1b024e64c8ec 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -24,18 +24,6 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) -{ -#ifdef CONFIG_PPC_MEM_KEYS - return (vm_flags & VM_SAO) ? - __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : - __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); -#else - return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); -#endif -} -#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) - static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) { if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index c475cf810aa8..cd17bd6fa36b 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -254,3 +254,29 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } + +#ifdef CONFIG_PPC64 +static pgprot_t powerpc_vm_get_page_prot(unsigned long vm_flags) +{ +#ifdef CONFIG_PPC_MEM_KEYS + return (vm_flags & VM_SAO) ? + __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : + __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); +#else + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); +#endif +} +#else +static pgprot_t powerpc_vm_get_page_prot(unsigned long vm_flags) +{ + return __pgprot(0); +} +#endif /* CONFIG_PPC64 */ + +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + return __pgprot(pgprot_val(protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | + pgprot_val(powerpc_vm_get_page_prot(vm_flags))); +} +EXPORT_SYMBOL(vm_get_page_prot);