]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: pagewalk: allow walk_page_range_novma() without mm
authorRolf Eike Beer <eb@emlix.com>
Mon, 22 Aug 2022 13:03:29 +0000 (15:03 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Aug 2022 05:03:25 +0000 (22:03 -0700)
Since e47690d756a7 ("x86: mm: avoid allocating struct mm_struct on the
stack") a pgd can be passed to walk_page_range_novma().  In case it is set
no place in the pagewalk code use the walk.mm anymore, so permit to pass a
NULL mm instead.  It is up to the caller to ensure proper locking on the
pgd in this case.

Link: https://lkml.kernel.org/r/5760214.MhkbZ0Pkbq@devpool047
Signed-off-by: Rolf Eike Beer <eb@emlix.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/pagewalk.c

index dc88bdbf787c94ed5fbf36af6fca651c89b092b5..4f07c120b5ccac8b7de23265903aaa1110b097a2 100644 (file)
@@ -506,6 +506,8 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
  * not backed by VMAs. Because 'unusual' entries may be walked this function
  * will also not lock the PTEs for the pte_entry() callback. This is useful for
  * walking the kernel pages tables or page tables for firmware.
+ *
+ * Either mm or pgd may be NULL, but not both.
  */
 int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
                          unsigned long end, const struct mm_walk_ops *ops,
@@ -520,10 +522,11 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
                .no_vma         = true
        };
 
-       if (start >= end || !walk.mm)
+       if (start >= end || (!walk.mm && !walk.pgd))
                return -EINVAL;
 
-       mmap_assert_locked(walk.mm);
+       if (walk.mm)
+               mmap_assert_locked(walk.mm);
 
        return walk_pgd_range(start, end, &walk);
 }