]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
powerpc/mm: fix mmap_lock bad unlock
authorLaurent Dufour <ldufour@linux.ibm.com>
Mon, 6 Mar 2023 15:42:44 +0000 (16:42 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:24:56 +0000 (16:24 -0700)
When page fault is tried holding the per VMA lock, bad_access_pkey() and
bad_access() should not be called because it is assuming the mmap_lock is
held.  In the case a bad access is detected, fall back to the default
path, grabbing the mmap_lock to handle the fault and report the error.

Link: https://lkml.kernel.org/r/20230306154244.17560-1-ldufour@linux.ibm.com
Fixes: 169db3bb4609 ("powerc/mm: try VMA lock-based page fault handling first")
Signed-off-by: Laurent Dufour <ldufour@linux.ibm.com>
Reported-by: Sachin Sant <sachinp@linux.ibm.com>
Link: https://lore.kernel.org/linux-mm/842502FB-F99C-417C-9648-A37D0ECDC9CE@linux.ibm.com
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/powerpc/mm/fault.c

index d0710ecc1fc761a000aecbe47a2736f950305a0e..531177a4ee0881b412c2b8ded4b62d9902107f1e 100644 (file)
@@ -484,17 +484,13 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
 
        if (unlikely(access_pkey_error(is_write, is_exec,
                                       (error_code & DSISR_KEYFAULT), vma))) {
-               int rc = bad_access_pkey(regs, address, vma);
-
                vma_end_read(vma);
-               return rc;
+               goto lock_mmap;
        }
 
        if (unlikely(access_error(is_write, is_exec, vma))) {
-               int rc = bad_access(regs, address);
-
                vma_end_read(vma);
-               return rc;
+               goto lock_mmap;
        }
 
        fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);