This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. It localizes
arch_vm_get_page_prot() and moves it near vm_get_page_prot().
Link: https://lkml.kernel.org/r/20220407103251.1209606-4-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Khalid Aziz <khalid.aziz@oracle.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_ELF_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
}
#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
-{
- pteval_t prot = 0;
-
- if (vm_flags & VM_ARM64_BTI)
- prot |= PTE_GP;
-
- /*
- * There are two conditions required for returning a Normal Tagged
- * memory type: (1) the user requested it via PROT_MTE passed to
- * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
- * register (1) as VM_MTE in the vma->vm_flags and (2) as
- * VM_MTE_ALLOWED. Note that the latter can only be set during the
- * mmap() call since mprotect() does not accept MAP_* flags.
- * Checking for VM_MTE only is sufficient since arch_validate_flags()
- * does not permit (VM_MTE & !VM_MTE_ALLOWED).
- */
- if (vm_flags & VM_MTE)
- prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
-
- return __pgprot(prot);
-}
-#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-
static inline bool arch_validate_prot(unsigned long prot,
unsigned long addr __always_unused)
{
return 0;
}
arch_initcall(adjust_protection_map);
+
+static pgprot_t arm64_arch_vm_get_page_prot(unsigned long vm_flags)
+{
+ pteval_t prot = 0;
+
+ if (vm_flags & VM_ARM64_BTI)
+ prot |= PTE_GP;
+
+ /*
+ * There are two conditions required for returning a Normal Tagged
+ * memory type: (1) the user requested it via PROT_MTE passed to
+ * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
+ * register (1) as VM_MTE in the vma->vm_flags and (2) as
+ * VM_MTE_ALLOWED. Note that the latter can only be set during the
+ * mmap() call since mprotect() does not accept MAP_* flags.
+ * Checking for VM_MTE only is sufficient since arch_validate_flags()
+ * does not permit (VM_MTE & !VM_MTE_ALLOWED).
+ */
+ if (vm_flags & VM_MTE)
+ prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
+
+ return __pgprot(prot);
+}
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
+ pgprot_val(arm64_arch_vm_get_page_prot(vm_flags)));
+
+ return ret;
+}
+EXPORT_SYMBOL(vm_get_page_prot);