From 6fba12b74b00e88f53df9950d820afd23d2ade8a Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 16 Jun 2023 09:45:06 -0400 Subject: [PATCH] patch-arm64.diff Signed-off-by: Liam R. Howlett --- arch/arm64/Kconfig | 1 + arch/arm64/mm/fault.c | 43 ++++++++++--------------------------------- 2 files changed, 11 insertions(+), 33 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1201d25a8a4..9088ac36efa7 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -225,6 +225,7 @@ config ARM64 select IRQ_DOMAIN select IRQ_FORCED_THREADING select KASAN_VMALLOC if KASAN + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_RELA select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index cb21ccd7940d..96008b3647a4 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -483,15 +483,11 @@ static void do_bad_area(unsigned long far, unsigned long esr, #define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) #define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) -static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, +static vm_fault_t __do_page_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags, struct pt_regs *regs) { - struct vm_area_struct *vma = find_vma(mm, addr); - - if (unlikely(!vma)) - return VM_FAULT_BADMAP; - /* * Ok, we have a good vm_area for this memory access, so we can handle * it. @@ -590,15 +586,15 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, #ifdef CONFIG_PER_VMA_LOCK if (!(mm_flags & FAULT_FLAG_USER)) - goto lock_mmap; + goto retry; vma = lock_vma_under_rcu(mm, addr); if (!vma) - goto lock_mmap; + goto retry; if (!(vma->vm_flags & vm_flags)) { vma_end_read(vma); - goto lock_mmap; + goto retry; } fault = handle_mm_fault(vma, addr & PAGE_MASK, mm_flags | FAULT_FLAG_VMA_LOCK, regs); @@ -616,33 +612,14 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, goto no_context; return 0; } -lock_mmap: #endif /* CONFIG_PER_VMA_LOCK */ - /* - * As per x86, we may deadlock here. However, since the kernel only - * validly references user space from well defined areas of the code, - * we can bug out early if this is from code which shouldn't. - */ - if (!mmap_read_trylock(mm)) { - if (!user_mode(regs) && !search_exception_tables(regs->pc)) - goto no_context; + retry: - mmap_read_lock(mm); - } else { - /* - * The above mmap_read_trylock() might have succeeded in which - * case, we'll have missed the might_sleep() from down_read(). - */ - might_sleep(); -#ifdef CONFIG_DEBUG_VM - if (!user_mode(regs) && !search_exception_tables(regs->pc)) { - mmap_read_unlock(mm); - goto no_context; - } -#endif - } + vma = lock_mm_and_find_vma(mm, addr, regs); + if (unlikely(!vma)) + goto no_context; - fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs); + fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs); /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { -- 2.49.0