]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
patch-arm64.diff
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 16 Jun 2023 13:45:06 +0000 (09:45 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 16 Jun 2023 15:38:06 +0000 (11:38 -0400)
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
arch/arm64/Kconfig
arch/arm64/mm/fault.c

index b1201d25a8a4ee75e6eeaddb46f626655883e6bf..9088ac36efa7720276fa2f66f824b7e13d8c1945 100644 (file)
@@ -225,6 +225,7 @@ config ARM64
        select IRQ_DOMAIN
        select IRQ_FORCED_THREADING
        select KASAN_VMALLOC if KASAN
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select NEED_DMA_MAP_STATE
        select NEED_SG_DMA_LENGTH
index cb21ccd7940d0e3cd9b496fa1c3110b3cfb1f75e..96008b3647a456595e5364ae81049da8e4490f31 100644 (file)
@@ -483,15 +483,11 @@ static void do_bad_area(unsigned long far, unsigned long esr,
 #define VM_FAULT_BADMAP                ((__force vm_fault_t)0x010000)
 #define VM_FAULT_BADACCESS     ((__force vm_fault_t)0x020000)
 
-static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
+static vm_fault_t __do_page_fault(struct mm_struct *mm,
+                                 struct vm_area_struct *vma, unsigned long addr,
                                  unsigned int mm_flags, unsigned long vm_flags,
                                  struct pt_regs *regs)
 {
-       struct vm_area_struct *vma = find_vma(mm, addr);
-
-       if (unlikely(!vma))
-               return VM_FAULT_BADMAP;
-
        /*
         * Ok, we have a good vm_area for this memory access, so we can handle
         * it.
@@ -590,15 +586,15 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
 
 #ifdef CONFIG_PER_VMA_LOCK
        if (!(mm_flags & FAULT_FLAG_USER))
-               goto lock_mmap;
+               goto retry;
 
        vma = lock_vma_under_rcu(mm, addr);
        if (!vma)
-               goto lock_mmap;
+               goto retry;
 
        if (!(vma->vm_flags & vm_flags)) {
                vma_end_read(vma);
-               goto lock_mmap;
+               goto retry;
        }
        fault = handle_mm_fault(vma, addr & PAGE_MASK,
                                mm_flags | FAULT_FLAG_VMA_LOCK, regs);
@@ -616,33 +612,14 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
                        goto no_context;
                return 0;
        }
-lock_mmap:
 #endif /* CONFIG_PER_VMA_LOCK */
-       /*
-        * As per x86, we may deadlock here. However, since the kernel only
-        * validly references user space from well defined areas of the code,
-        * we can bug out early if this is from code which shouldn't.
-        */
-       if (!mmap_read_trylock(mm)) {
-               if (!user_mode(regs) && !search_exception_tables(regs->pc))
-                       goto no_context;
+
 retry:
-               mmap_read_lock(mm);
-       } else {
-               /*
-                * The above mmap_read_trylock() might have succeeded in which
-                * case, we'll have missed the might_sleep() from down_read().
-                */
-               might_sleep();
-#ifdef CONFIG_DEBUG_VM
-               if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
-                       mmap_read_unlock(mm);
-                       goto no_context;
-               }
-#endif
-       }
+       vma = lock_mm_and_find_vma(mm, addr, regs);
+       if (unlikely(!vma))
+               goto no_context;
 
-       fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
+       fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);
 
        /* Quick path to respond to signals */
        if (fault_signal_pending(fault, regs)) {