From 4b847d3e4f87c800ec0b0a58520b966ee2317bda Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 17 Aug 2022 14:21:12 +0800 Subject: [PATCH] mm/damon: validate if the pmd entry is present before accessing The pmd_huge() is used to validate if the pmd entry is mapped by a huge page, also including the case of non-present (migration or hwpoisoned) pmd entry on arm64 or x86 architectures. Thus we should validate if it is present before making the pmd entry old or getting young state, otherwise we can not get the correct corresponding page. Link: https://lkml.kernel.org/r/2838b6737bc259cf575ff11fd1c4b7fdb340fa73.1660717122.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/vaddr.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 3c7b9d6dca95d..1d16c6c796386 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -304,6 +304,11 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, if (pmd_huge(*pmd)) { ptl = pmd_lock(walk->mm, pmd); + if (!pmd_present(*pmd)) { + spin_unlock(ptl); + return 0; + } + if (pmd_huge(*pmd)) { damon_pmdp_mkold(pmd, walk->mm, addr); spin_unlock(ptl); @@ -431,6 +436,11 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (pmd_huge(*pmd)) { ptl = pmd_lock(walk->mm, pmd); + if (!pmd_present(*pmd)) { + spin_unlock(ptl); + return 0; + } + if (!pmd_huge(*pmd)) { spin_unlock(ptl); goto regular_page; -- 2.50.1