]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm: don't skip arch_sync_kernel_mappings() in error paths
authorRyan Roberts <ryan.roberts@arm.com>
Wed, 26 Feb 2025 12:16:09 +0000 (12:16 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 6 Mar 2025 05:36:18 +0000 (21:36 -0800)
Fix callers that previously skipped calling arch_sync_kernel_mappings() if
an error occurred during a pgtable update.  The call is still required to
sync any pgtable updates that may have occurred prior to hitting the error
condition.

These are theoretical bugs discovered during code review.

Link: https://lkml.kernel.org/r/20250226121610.2401743-1-ryan.roberts@arm.com
Fixes: 2ba3e6947aed ("mm/vmalloc: track which page-table levels were modified")
Fixes: 0c95cba49255 ("mm: apply_to_pte_range warn and fail if a large pte is encountered")
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christop Hellwig <hch@infradead.org>
Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c
mm/vmalloc.c

index b4d3d4893267c8c9007a2b6e5419136ece85b59a..55d0d49546273689d16d8a090ffc85bb65b0ff2a 100644 (file)
@@ -3051,8 +3051,10 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
                next = pgd_addr_end(addr, end);
                if (pgd_none(*pgd) && !create)
                        continue;
-               if (WARN_ON_ONCE(pgd_leaf(*pgd)))
-                       return -EINVAL;
+               if (WARN_ON_ONCE(pgd_leaf(*pgd))) {
+                       err = -EINVAL;
+                       break;
+               }
                if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
                        if (!create)
                                continue;
index a6e7acebe9adf5e6c8abd52dcf7d02a6a1bc3030..61981ee1c9d2f769d4a06ab542fc84334c1b0cbd 100644 (file)
@@ -586,13 +586,13 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
                        mask |= PGTBL_PGD_MODIFIED;
                err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
                if (err)
-                       return err;
+                       break;
        } while (pgd++, addr = next, addr != end);
 
        if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
                arch_sync_kernel_mappings(start, end);
 
-       return 0;
+       return err;
 }
 
 /*