From 1d3a5ba1b1f74ec1e7ec25f234a9116546fae1bc Mon Sep 17 00:00:00 2001 From: Rolf Eike Beer Date: Mon, 22 Aug 2022 15:03:29 +0200 Subject: [PATCH] mm: pagewalk: allow walk_page_range_novma() without mm Since e47690d756a7 ("x86: mm: avoid allocating struct mm_struct on the stack") a pgd can be passed to walk_page_range_novma(). In case it is set no place in the pagewalk code use the walk.mm anymore, so permit to pass a NULL mm instead. It is up to the caller to ensure proper locking on the pgd in this case. Link: https://lkml.kernel.org/r/5760214.MhkbZ0Pkbq@devpool047 Signed-off-by: Rolf Eike Beer Signed-off-by: Andrew Morton --- mm/pagewalk.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mm/pagewalk.c b/mm/pagewalk.c index e6d3ffe41e02..7629838eea13 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -506,6 +506,8 @@ int walk_page_range(struct mm_struct *mm, unsigned long start, * not backed by VMAs. Because 'unusual' entries may be walked this function * will also not lock the PTEs for the pte_entry() callback. This is useful for * walking the kernel pages tables or page tables for firmware. + * + * Either mm or pgd may be NULL, but not both. */ int walk_page_range_novma(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, @@ -520,10 +522,11 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start, .no_vma = true }; - if (start >= end || !walk.mm) + if (start >= end || (!walk.mm && !walk.pgd)) return -EINVAL; - mmap_assert_locked(walk.mm); + if (walk.mm) + mmap_assert_locked(walk.mm); return walk_pgd_range(start, end, &walk); } -- 2.50.1