From 2616b370323a953c437ed2bf40a277e9deaa3709 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 9 May 2025 17:30:32 +0200 Subject: [PATCH 01/16] selftests/mm: add simple VM_PFNMAP tests based on mmap'ing /dev/mem Let's test some basic functionality using /dev/mem. These tests will implicitly cover some PAT (Page Attribute Handling) handling on x86. These tests will only run when /dev/mem access to the first two pages in physical address space is possible and allowed; otherwise, the tests are skipped. On current x86-64 with PAT inside a VM, all tests pass: TAP version 13 1..6 # Starting 6 tests from 1 test cases. # RUN pfnmap.madvise_disallowed ... # OK pfnmap.madvise_disallowed ok 1 pfnmap.madvise_disallowed # RUN pfnmap.munmap_split ... # OK pfnmap.munmap_split ok 2 pfnmap.munmap_split # RUN pfnmap.mremap_fixed ... # OK pfnmap.mremap_fixed ok 3 pfnmap.mremap_fixed # RUN pfnmap.mremap_shrink ... # OK pfnmap.mremap_shrink ok 4 pfnmap.mremap_shrink # RUN pfnmap.mremap_expand ... # OK pfnmap.mremap_expand ok 5 pfnmap.mremap_expand # RUN pfnmap.fork ... # OK pfnmap.fork ok 6 pfnmap.fork # PASSED: 6 / 6 tests passed. # Totals: pass:6 fail:0 xfail:0 xpass:0 skip:0 error:0 However, we are able to trigger: [ 27.888251] x86/PAT: pfnmap:1790 freeing invalid memtype [mem 0x00000000-0x00000fff] There are probably more things worth testing in the future, such as MAP_PRIVATE handling. But this set of tests is sufficient to cover most of the things we will rework regarding PAT handling. Link: https://lkml.kernel.org/r/20250509153033.952746-1-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Cc: Shuah Khan Cc: Ingo Molnar Cc: Peter Xu Cc: Dev Jain Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/.gitignore | 1 + tools/testing/selftests/mm/Makefile | 1 + tools/testing/selftests/mm/pfnmap.c | 196 ++++++++++++++++++++++ tools/testing/selftests/mm/run_vmtests.sh | 4 + 4 files changed, 202 insertions(+) create mode 100644 tools/testing/selftests/mm/pfnmap.c diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore index 91db34941a14..824266982aa3 100644 --- a/tools/testing/selftests/mm/.gitignore +++ b/tools/testing/selftests/mm/.gitignore @@ -20,6 +20,7 @@ mremap_test on-fault-limit transhuge-stress pagemap_ioctl +pfnmap *.tmp* protection_keys protection_keys_32 diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index ad4d6043a60f..ae6f994d3add 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -84,6 +84,7 @@ TEST_GEN_FILES += mremap_test TEST_GEN_FILES += mseal_test TEST_GEN_FILES += on-fault-limit TEST_GEN_FILES += pagemap_ioctl +TEST_GEN_FILES += pfnmap TEST_GEN_FILES += thuge-gen TEST_GEN_FILES += transhuge-stress TEST_GEN_FILES += uffd-stress diff --git a/tools/testing/selftests/mm/pfnmap.c b/tools/testing/selftests/mm/pfnmap.c new file mode 100644 index 000000000000..8a9d19b6020c --- /dev/null +++ b/tools/testing/selftests/mm/pfnmap.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Basic VM_PFNMAP tests relying on mmap() of '/dev/mem' + * + * Copyright 2025, Red Hat, Inc. + * + * Author(s): David Hildenbrand + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../kselftest_harness.h" +#include "vm_util.h" + +static sigjmp_buf sigjmp_buf_env; + +static void signal_handler(int sig) +{ + siglongjmp(sigjmp_buf_env, -EFAULT); +} + +static int test_read_access(char *addr, size_t size, size_t pagesize) +{ + size_t offs; + int ret; + + if (signal(SIGSEGV, signal_handler) == SIG_ERR) + return -EINVAL; + + ret = sigsetjmp(sigjmp_buf_env, 1); + if (!ret) { + for (offs = 0; offs < size; offs += pagesize) + /* Force a read that the compiler cannot optimize out. */ + *((volatile char *)(addr + offs)); + } + if (signal(SIGSEGV, signal_handler) == SIG_ERR) + return -EINVAL; + + return ret; +} + +FIXTURE(pfnmap) +{ + size_t pagesize; + int dev_mem_fd; + char *addr1; + size_t size1; + char *addr2; + size_t size2; +}; + +FIXTURE_SETUP(pfnmap) +{ + self->pagesize = getpagesize(); + + self->dev_mem_fd = open("/dev/mem", O_RDONLY); + if (self->dev_mem_fd < 0) + SKIP(return, "Cannot open '/dev/mem'\n"); + + /* We'll require the first two pages throughout our tests ... */ + self->size1 = self->pagesize * 2; + self->addr1 = mmap(NULL, self->size1, PROT_READ, MAP_SHARED, + self->dev_mem_fd, 0); + if (self->addr1 == MAP_FAILED) + SKIP(return, "Cannot mmap '/dev/mem'\n"); + + /* ... and want to be able to read from them. */ + if (test_read_access(self->addr1, self->size1, self->pagesize)) + SKIP(return, "Cannot read-access mmap'ed '/dev/mem'\n"); + + self->size2 = 0; + self->addr2 = MAP_FAILED; +} + +FIXTURE_TEARDOWN(pfnmap) +{ + if (self->addr2 != MAP_FAILED) + munmap(self->addr2, self->size2); + if (self->addr1 != MAP_FAILED) + munmap(self->addr1, self->size1); + if (self->dev_mem_fd >= 0) + close(self->dev_mem_fd); +} + +TEST_F(pfnmap, madvise_disallowed) +{ + int advices[] = { + MADV_DONTNEED, + MADV_DONTNEED_LOCKED, + MADV_FREE, + MADV_WIPEONFORK, + MADV_COLD, + MADV_PAGEOUT, + MADV_POPULATE_READ, + MADV_POPULATE_WRITE, + }; + int i; + + /* All these advices must be rejected. */ + for (i = 0; i < ARRAY_SIZE(advices); i++) { + EXPECT_LT(madvise(self->addr1, self->pagesize, advices[i]), 0); + EXPECT_EQ(errno, EINVAL); + } +} + +TEST_F(pfnmap, munmap_split) +{ + /* + * Unmap the first page. This munmap() call is not really expected to + * fail, but we might be able to trigger other internal issues. + */ + ASSERT_EQ(munmap(self->addr1, self->pagesize), 0); + + /* + * Remap the first page while the second page is still mapped. This + * makes sure that any PAT tracking on x86 will allow for mmap()'ing + * a page again while some parts of the first mmap() are still + * around. + */ + self->size2 = self->pagesize; + self->addr2 = mmap(NULL, self->pagesize, PROT_READ, MAP_SHARED, + self->dev_mem_fd, 0); + ASSERT_NE(self->addr2, MAP_FAILED); +} + +TEST_F(pfnmap, mremap_fixed) +{ + char *ret; + + /* Reserve a destination area. */ + self->size2 = self->size1; + self->addr2 = mmap(NULL, self->size2, PROT_READ, MAP_ANON | MAP_PRIVATE, + -1, 0); + ASSERT_NE(self->addr2, MAP_FAILED); + + /* mremap() over our destination. */ + ret = mremap(self->addr1, self->size1, self->size2, + MREMAP_FIXED | MREMAP_MAYMOVE, self->addr2); + ASSERT_NE(ret, MAP_FAILED); +} + +TEST_F(pfnmap, mremap_shrink) +{ + char *ret; + + /* Shrinking is expected to work. */ + ret = mremap(self->addr1, self->size1, self->size1 - self->pagesize, 0); + ASSERT_NE(ret, MAP_FAILED); +} + +TEST_F(pfnmap, mremap_expand) +{ + /* + * Growing is not expected to work, and getting it right would + * be challenging. So this test primarily serves as an early warning + * that something that probably should never work suddenly works. + */ + self->size2 = self->size1 + self->pagesize; + self->addr2 = mremap(self->addr1, self->size1, self->size2, MREMAP_MAYMOVE); + ASSERT_EQ(self->addr2, MAP_FAILED); +} + +TEST_F(pfnmap, fork) +{ + pid_t pid; + int ret; + + /* fork() a child and test if the child can access the pages. */ + pid = fork(); + ASSERT_GE(pid, 0); + + if (!pid) { + EXPECT_EQ(test_read_access(self->addr1, self->size1, + self->pagesize), 0); + exit(0); + } + + wait(&ret); + if (WIFEXITED(ret)) + ret = WEXITSTATUS(ret); + else + ret = -EINVAL; + ASSERT_EQ(ret, 0); +} + +TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 188b125bf1f6..dddd1dd8af14 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -63,6 +63,8 @@ separated by spaces: test soft dirty page bit semantics - pagemap test pagemap_scan IOCTL +- pfnmap + tests for VM_PFNMAP handling - cow test copy-on-write semantics - thp @@ -472,6 +474,8 @@ fi CATEGORY="pagemap" run_test ./pagemap_ioctl +CATEGORY="pfnmap" run_test ./pfnmap + # COW tests CATEGORY="cow" run_test ./cow -- 2.51.0 From 83b6d498d027002e79c2ce40b5729137500c3170 Mon Sep 17 00:00:00 2001 From: Zhongkun He Date: Fri, 9 May 2025 16:35:28 +0800 Subject: [PATCH 02/16] mm: cma: set early_pfn and bitmap as a union in cma_memrange Since early_pfn and bitmap are never used at the same time, they can be defined as a union to reduce the size of the data structure. This change can save 8 * u64 entries per CMA. Link: https://lkml.kernel.org/r/20250509083528.1360952-1-hezhongkun.hzk@bytedance.com Signed-off-by: Zhongkun He Signed-off-by: Andrew Morton --- mm/cma.c | 11 ++++++----- mm/cma.h | 6 ++++-- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index 15632939f20a..ec4b9a401b7d 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -143,13 +143,14 @@ bool cma_validate_zones(struct cma *cma) static void __init cma_activate_area(struct cma *cma) { - unsigned long pfn, end_pfn; + unsigned long pfn, end_pfn, early_pfn[CMA_MAX_RANGES]; int allocrange, r; struct cma_memrange *cmr; unsigned long bitmap_count, count; for (allocrange = 0; allocrange < cma->nranges; allocrange++) { cmr = &cma->ranges[allocrange]; + early_pfn[allocrange] = cmr->early_pfn; cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr), GFP_KERNEL); if (!cmr->bitmap) @@ -161,13 +162,13 @@ static void __init cma_activate_area(struct cma *cma) for (r = 0; r < cma->nranges; r++) { cmr = &cma->ranges[r]; - if (cmr->early_pfn != cmr->base_pfn) { - count = cmr->early_pfn - cmr->base_pfn; + if (early_pfn[r] != cmr->base_pfn) { + count = early_pfn[r] - cmr->base_pfn; bitmap_count = cma_bitmap_pages_to_bits(cma, count); bitmap_set(cmr->bitmap, 0, bitmap_count); } - for (pfn = cmr->early_pfn; pfn < cmr->base_pfn + cmr->count; + for (pfn = early_pfn[r]; pfn < cmr->base_pfn + cmr->count; pfn += pageblock_nr_pages) init_cma_reserved_pageblock(pfn_to_page(pfn)); } @@ -193,7 +194,7 @@ cleanup: for (r = 0; r < allocrange; r++) { cmr = &cma->ranges[r]; end_pfn = cmr->base_pfn + cmr->count; - for (pfn = cmr->early_pfn; pfn < end_pfn; pfn++) + for (pfn = early_pfn[r]; pfn < end_pfn; pfn++) free_reserved_page(pfn_to_page(pfn)); } } diff --git a/mm/cma.h b/mm/cma.h index 41a3ab0ec3de..c70180c36559 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -25,9 +25,11 @@ struct cma_kobject { */ struct cma_memrange { unsigned long base_pfn; - unsigned long early_pfn; unsigned long count; - unsigned long *bitmap; + union { + unsigned long early_pfn; + unsigned long *bitmap; + }; #ifdef CONFIG_CMA_DEBUGFS struct debugfs_u32_array dfs_bitmap; #endif -- 2.51.0 From 4df65651f7075481e44c03bb39855a38783d87ac Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 9 May 2025 08:45:21 +0800 Subject: [PATCH 03/16] mm: mincore: use pte_batch_hint() to batch process large folios When I tested the mincore() syscall, I observed that it takes longer with 64K mTHP enabled on my Arm64 server. The reason is the mincore_pte_range() still checks each PTE individually, even when the PTEs are contiguous, which is not efficient. Thus we can use pte_batch_hint() to get the batch number of the present contiguous PTEs, which can improve the performance. I tested the mincore() syscall with 1G anonymous memory populated with 64K mTHP, and observed an obvious performance improvement: w/o patch w/ patch changes 6022us 549us +91% Moreover, I also tested mincore() with disabling mTHP/THP, and did not see any obvious regression for base pages. Link: https://lkml.kernel.org/r/99cb00ee626ceb6e788102ca36821815cd832237.1746697240.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Barry Song Reviewed-by: Dev Jain Acked-by: David Hildenbrand Cc: Dev Jain Cc: Ryan Roberts Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/mincore.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/mm/mincore.c b/mm/mincore.c index 832f29f46767..42d6c9c8da86 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -21,6 +21,7 @@ #include #include "swap.h" +#include "internal.h" static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -105,6 +106,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pte_t *ptep; unsigned char *vec = walk->private; int nr = (end - addr) >> PAGE_SHIFT; + int step, i; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { @@ -118,16 +120,26 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, walk->action = ACTION_AGAIN; return 0; } - for (; addr != end; ptep++, addr += PAGE_SIZE) { + for (; addr != end; ptep += step, addr += step * PAGE_SIZE) { pte_t pte = ptep_get(ptep); + step = 1; /* We need to do cache lookup too for pte markers */ if (pte_none_mostly(pte)) __mincore_unmapped_range(addr, addr + PAGE_SIZE, vma, vec); - else if (pte_present(pte)) - *vec = 1; - else { /* pte is a swap entry */ + else if (pte_present(pte)) { + unsigned int batch = pte_batch_hint(ptep, pte); + + if (batch > 1) { + unsigned int max_nr = (end - addr) >> PAGE_SHIFT; + + step = min_t(unsigned int, batch, max_nr); + } + + for (i = 0; i < step; i++) + vec[i] = 1; + } else { /* pte is a swap entry */ swp_entry_t entry = pte_to_swp_entry(pte); if (non_swap_entry(entry)) { @@ -146,7 +158,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, #endif } } - vec++; + vec += step; } pte_unmap_unlock(ptep - 1, ptl); out: -- 2.51.0 From ed1a7814036c60ced6198548558342ef75af3ad8 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 12 May 2025 14:34:14 +0200 Subject: [PATCH 04/16] x86/mm/pat: factor out setting cachemode into pgprot_set_cachemode() VM_PAT annoyed me too much and wasted too much of my time, let's clean PAT handling up and remove VM_PAT. This should sort out various issues with VM_PAT we discovered recently, and will hopefully make the whole code more stable and easier to maintain. In essence: we stop letting PAT mode mess with VMAs and instead lift what to track/untrack to the MM core. We remember per VMA which pfn range we tracked in a new struct we attach to a VMA (we have space without exceeding 192 bytes), use a kref to share it among VMAs during split/mremap/fork, and automatically untrack once the kref drops to 0. This implies that we'll keep tracking a full pfn range even after partially unmapping it, until fully unmapping it; but as that case was mostly broken before, this at least makes it work in a way that is least intrusive to VMA handling. Shrinking with mremap() used to work in a hacky way, now we'll similarly keep the original pfn range tacked even after this form of partial unmap. Does anybody care about that? Unlikely. If we run into issues, we could likely handled that (adjust the tracking) when our kref drops to 1 while freeing a VMA. But it adds more complexity, so avoid that for now. Briefly tested with the new pfnmap selftests [1]. This patch (of 11): Let's factor it out to make the code easier to grasp. Drop one comment where it is now rather obvious what is happening. Use it also in pgprot_writecombine()/pgprot_writethrough() where clearing the old cachemode might not be required, but given that we are already doing a function call, no need to care about this micro-optimization. Link: https://lkml.kernel.org/r/20250512123424.637989-1-david@redhat.com Link: https://lkml.kernel.org/r/20250512123424.637989-2-david@redhat.com Link: https://lkml.kernel.org/r/20250509153033.952746-1-david@redhat.com [1] Signed-off-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Acked-by: Ingo Molnar [x86 bits] Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: David Hildenbrand Cc: "H. Peter Anvin" Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- arch/x86/mm/pat/memtype.c | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 72d8cbc61158..edec5859651d 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -800,6 +800,12 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) } #endif /* CONFIG_STRICT_DEVMEM */ +static inline void pgprot_set_cachemode(pgprot_t *prot, enum page_cache_mode pcm) +{ + *prot = __pgprot((pgprot_val(*prot) & ~_PAGE_CACHE_MASK) | + cachemode2protval(pcm)); +} + int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { @@ -811,8 +817,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, if (file->f_flags & O_DSYNC) pcm = _PAGE_CACHE_MODE_UC_MINUS; - *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | - cachemode2protval(pcm)); + pgprot_set_cachemode(vma_prot, pcm); return 1; } @@ -880,9 +885,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), cattr_name(pcm)); - *vma_prot = __pgprot((pgprot_val(*vma_prot) & - (~_PAGE_CACHE_MASK)) | - cachemode2protval(pcm)); + pgprot_set_cachemode(vma_prot, pcm); } return 0; } @@ -907,9 +910,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, * We allow returning different type than the one requested in * non strict case. */ - *vma_prot = __pgprot((pgprot_val(*vma_prot) & - (~_PAGE_CACHE_MASK)) | - cachemode2protval(pcm)); + pgprot_set_cachemode(vma_prot, pcm); } if (memtype_kernel_map_sync(paddr, size, pcm) < 0) { @@ -1060,9 +1061,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, return -EINVAL; } - *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | - cachemode2protval(pcm)); - + pgprot_set_cachemode(prot, pcm); return 0; } @@ -1073,10 +1072,8 @@ void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) if (!pat_enabled()) return; - /* Set prot based on lookup */ pcm = lookup_memtype(pfn_t_to_phys(pfn)); - *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | - cachemode2protval(pcm)); + pgprot_set_cachemode(prot, pcm); } /* @@ -1115,15 +1112,15 @@ void untrack_pfn_clear(struct vm_area_struct *vma) pgprot_t pgprot_writecombine(pgprot_t prot) { - return __pgprot(pgprot_val(prot) | - cachemode2protval(_PAGE_CACHE_MODE_WC)); + pgprot_set_cachemode(&prot, _PAGE_CACHE_MODE_WC); + return prot; } EXPORT_SYMBOL_GPL(pgprot_writecombine); pgprot_t pgprot_writethrough(pgprot_t prot) { - return __pgprot(pgprot_val(prot) | - cachemode2protval(_PAGE_CACHE_MODE_WT)); + pgprot_set_cachemode(&prot, _PAGE_CACHE_MODE_WT); + return prot; } EXPORT_SYMBOL_GPL(pgprot_writethrough); -- 2.51.0 From e1e1a3ae7f9f0cb06e80af0f24927be63149d081 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 12 May 2025 14:34:15 +0200 Subject: [PATCH 05/16] mm: convert track_pfn_insert() to pfnmap_setup_cachemode*() ... by factoring it out from track_pfn_remap() into pfnmap_setup_cachemode() and provide pfnmap_setup_cachemode_pfn() as a replacement for track_pfn_insert(). For PMDs/PUDs, we keep checking a single pfn only. Add some documentation, and also document why it is valid to not check the whole pfn range. We'll reuse pfnmap_setup_cachemode() from core MM next. Link: https://lkml.kernel.org/r/20250512123424.637989-3-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Ingo Molnar [x86 bits] Reviewed-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: "H. Peter Anvin" Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- arch/x86/mm/pat/memtype.c | 24 ++++++------------ include/linux/pgtable.h | 52 +++++++++++++++++++++++++++++++++------ mm/huge_memory.c | 5 ++-- mm/memory.c | 4 +-- 4 files changed, 57 insertions(+), 28 deletions(-) diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index edec5859651d..fa78facc6f63 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -1031,7 +1031,6 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long addr, unsigned long size) { resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; - enum page_cache_mode pcm; /* reserve the whole chunk starting from paddr */ if (!vma || (addr == vma->vm_start @@ -1044,13 +1043,17 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, return ret; } + return pfnmap_setup_cachemode(pfn, size, prot); +} + +int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, pgprot_t *prot) +{ + resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; + enum page_cache_mode pcm; + if (!pat_enabled()) return 0; - /* - * For anything smaller than the vma size we set prot based on the - * lookup. - */ pcm = lookup_memtype(paddr); /* Check memtype for the remaining pages */ @@ -1065,17 +1068,6 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, return 0; } -void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) -{ - enum page_cache_mode pcm; - - if (!pat_enabled()) - return; - - pcm = lookup_memtype(pfn_t_to_phys(pfn)); - pgprot_set_cachemode(prot, pcm); -} - /* * untrack_pfn is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index f1e890b60460..be1745839871 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1496,13 +1496,10 @@ static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, return 0; } -/* - * track_pfn_insert is called when a _new_ single pfn is established - * by vmf_insert_pfn(). - */ -static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, - pfn_t pfn) +static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, + pgprot_t *prot) { + return 0; } /* @@ -1552,8 +1549,32 @@ static inline void untrack_pfn_clear(struct vm_area_struct *vma) extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long addr, unsigned long size); -extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, - pfn_t pfn); + +/** + * pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range + * @pfn: the start of the pfn range + * @size: the size of the pfn range in bytes + * @prot: the pgprot to modify + * + * Lookup the cachemode for the pfn range starting at @pfn with the size + * @size and store it in @prot, leaving other data in @prot unchanged. + * + * This allows for a hardware implementation to have fine-grained control of + * memory cache behavior at page level granularity. Without a hardware + * implementation, this function does nothing. + * + * Currently there is only one implementation for this - x86 Page Attribute + * Table (PAT). See Documentation/arch/x86/pat.rst for more details. + * + * This function can fail if the pfn range spans pfns that require differing + * cachemodes. If the pfn range was previously verified to have a single + * cachemode, it is sufficient to query only a single pfn. The assumption is + * that this is the case for drivers using the vmf_insert_pfn*() interface. + * + * Returns 0 on success and -EINVAL on error. + */ +int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, + pgprot_t *prot); extern int track_pfn_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, unsigned long *pfn); extern void untrack_pfn_copy(struct vm_area_struct *dst_vma, @@ -1563,6 +1584,21 @@ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, extern void untrack_pfn_clear(struct vm_area_struct *vma); #endif +/** + * pfnmap_setup_cachemode_pfn - setup the cachemode in the pgprot for a pfn + * @pfn: the pfn + * @prot: the pgprot to modify + * + * Lookup the cachemode for @pfn and store it in @prot, leaving other + * data in @prot unchanged. + * + * See pfnmap_setup_cachemode() for details. + */ +static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot) +{ + pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot); +} + #ifdef CONFIG_MMU #ifdef __HAVE_COLOR_ZERO_PAGE static inline int is_zero_pfn(unsigned long pfn) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2780a12b25f0..d3e66136e41a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1455,7 +1455,8 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) return VM_FAULT_OOM; } - track_pfn_insert(vma, &pgprot, pfn); + pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot); + ptl = pmd_lock(vma->vm_mm, vmf->pmd); error = insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); @@ -1577,7 +1578,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) if (addr < vma->vm_start || addr >= vma->vm_end) return VM_FAULT_SIGBUS; - track_pfn_insert(vma, &pgprot, pfn); + pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot); ptl = pud_lock(vma->vm_mm, vmf->pud); insert_pfn_pud(vma, addr, vmf->pud, pfn, write); diff --git a/mm/memory.c b/mm/memory.c index 99af83434e7c..064fc55d8eab 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2564,7 +2564,7 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, if (!pfn_modify_allowed(pfn, pgprot)) return VM_FAULT_SIGBUS; - track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); + pfnmap_setup_cachemode_pfn(pfn, &pgprot); return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, false); @@ -2627,7 +2627,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, if (addr < vma->vm_start || addr >= vma->vm_end) return VM_FAULT_SIGBUS; - track_pfn_insert(vma, &pgprot, pfn); + pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot); if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) return VM_FAULT_SIGBUS; -- 2.51.0 From db44863a4d9df3604c4ff76507bb2056b6392e58 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 12 May 2025 14:34:16 +0200 Subject: [PATCH 06/16] mm: introduce pfnmap_track() and pfnmap_untrack() and use them for memremap Let's provide variants of track_pfn_remap() and untrack_pfn() that won't mess with VMAs, and replace the usage in mm/memremap.c. Add some documentation. Link: https://lkml.kernel.org/r/20250512123424.637989-4-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Acked-by: Ingo Molnar [x86 bits] Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: "H. Peter Anvin" Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- arch/x86/mm/pat/memtype.c | 14 ++++++++++++++ include/linux/pgtable.h | 39 +++++++++++++++++++++++++++++++++++++++ mm/memremap.c | 8 ++++---- 3 files changed, 57 insertions(+), 4 deletions(-) diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index fa78facc6f63..1ec8af6cad6b 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -1068,6 +1068,20 @@ int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, pgprot_t *prot return 0; } +int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot) +{ + const resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; + + return reserve_pfn_range(paddr, size, prot, 0); +} + +void pfnmap_untrack(unsigned long pfn, unsigned long size) +{ + const resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; + + free_pfn_range(paddr, size); +} + /* * untrack_pfn is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index be1745839871..90f72cd35839 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1502,6 +1502,16 @@ static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, return 0; } +static inline int pfnmap_track(unsigned long pfn, unsigned long size, + pgprot_t *prot) +{ + return 0; +} + +static inline void pfnmap_untrack(unsigned long pfn, unsigned long size) +{ +} + /* * track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page * tables copied during copy_page_range(). Will store the pfn to be @@ -1575,6 +1585,35 @@ extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, */ int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, pgprot_t *prot); + +/** + * pfnmap_track - track a pfn range + * @pfn: the start of the pfn range + * @size: the size of the pfn range in bytes + * @prot: the pgprot to track + * + * Requested the pfn range to be 'tracked' by a hardware implementation and + * setup the cachemode in @prot similar to pfnmap_setup_cachemode(). + * + * This allows for fine-grained control of memory cache behaviour at page + * level granularity. Tracking memory this way is persisted across VMA splits + * (VMA merging does not apply for VM_PFNMAP). + * + * Currently, there is only one implementation for this - x86 Page Attribute + * Table (PAT). See Documentation/arch/x86/pat.rst for more details. + * + * Returns 0 on success and -EINVAL on error. + */ +int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot); + +/** + * pfnmap_untrack - untrack a pfn range + * @pfn: the start of the pfn range + * @size: the size of the pfn range in bytes + * + * Untrack a pfn range previously tracked through pfnmap_track(). + */ +void pfnmap_untrack(unsigned long pfn, unsigned long size); extern int track_pfn_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, unsigned long *pfn); extern void untrack_pfn_copy(struct vm_area_struct *dst_vma, diff --git a/mm/memremap.c b/mm/memremap.c index 2aebc1b192da..c417c843e9b1 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -130,7 +130,7 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) } mem_hotplug_done(); - untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true); + pfnmap_untrack(PHYS_PFN(range->start), range_len(range)); pgmap_array_delete(range); } @@ -211,8 +211,8 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, if (nid < 0) nid = numa_mem_id(); - error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0, - range_len(range)); + error = pfnmap_track(PHYS_PFN(range->start), range_len(range), + ¶ms->pgprot); if (error) goto err_pfn_remap; @@ -277,7 +277,7 @@ err_add_memory: if (!is_private) kasan_remove_zero_shadow(__va(range->start), range_len(range)); err_kasan: - untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true); + pfnmap_untrack(PHYS_PFN(range->start), range_len(range)); err_pfn_remap: pgmap_array_delete(range); return error; -- 2.51.0 From f8e97613fed25758ddf52159b87e1c66e619a23a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 12 May 2025 14:34:17 +0200 Subject: [PATCH 07/16] mm: convert VM_PFNMAP tracking to pfnmap_track() + pfnmap_untrack() Let's use our new interface. In remap_pfn_range(), we'll now decide whether we have to track (full VMA covered) or only lookup the cachemode (partial VMA covered). Remember what we have to untrack by linking it from the VMA. When duplicating VMAs (e.g., splitting, mremap, fork), we'll handle it similar to anon VMA names, and use a kref to share the tracking. Once the last VMA un-refs our tracking data, we'll do the untracking, which simplifies things a lot and should sort our various issues we saw recently, for example, when partially unmapping/zapping a tracked VMA. This change implies that we'll keep tracking the original PFN range even after splitting + partially unmapping it: not too bad, because it was not working reliably before. The only thing that kind-of worked before was shrinking such a mapping using mremap(): we managed to adjust the reservation in a hacky way, now we won't adjust the reservation but leave it around until all involved VMAs are gone. If that ever turns out to be an issue, we could hook into VM splitting code and split the tracking; however, that adds complexity that might not be required, so we'll keep it simple for now. Link: https://lkml.kernel.org/r/20250512123424.637989-5-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Ingo Molnar [x86 bits] Reviewed-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: "H. Peter Anvin" Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/mm_inline.h | 2 + include/linux/mm_types.h | 11 ++++++ mm/memory.c | 82 +++++++++++++++++++++++++++++++-------- mm/mmap.c | 5 --- mm/mremap.c | 4 -- mm/vma_init.c | 50 ++++++++++++++++++++++++ 6 files changed, 129 insertions(+), 25 deletions(-) diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index f9157a0c42a5..89b518ff097e 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -447,6 +447,8 @@ static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, #endif /* CONFIG_ANON_VMA_NAME */ +void pfnmap_track_ctx_release(struct kref *ref); + static inline void init_tlb_flush_pending(struct mm_struct *mm) { atomic_set(&mm->tlb_flush_pending, 0); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 15808cad2bc1..3e934dc6057c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -763,6 +763,14 @@ struct vma_numab_state { int prev_scan_seq; }; +#ifdef __HAVE_PFNMAP_TRACKING +struct pfnmap_track_ctx { + struct kref kref; + unsigned long pfn; + unsigned long size; /* in bytes */ +}; +#endif + /* * Describes a VMA that is about to be mmap()'ed. Drivers may choose to * manipulate mutable fields which will cause those fields to be updated in the @@ -900,6 +908,9 @@ struct vm_area_struct { struct anon_vma_name *anon_name; #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +#ifdef __HAVE_PFNMAP_TRACKING + struct pfnmap_track_ctx *pfnmap_track_ctx; +#endif } __randomize_layout; #ifdef CONFIG_NUMA diff --git a/mm/memory.c b/mm/memory.c index 064fc55d8eab..4cf4adb0de26 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1371,7 +1371,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; struct mmu_notifier_range range; - unsigned long next, pfn = 0; + unsigned long next; bool is_cow; int ret; @@ -1381,12 +1381,6 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) if (is_vm_hugetlb_page(src_vma)) return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma); - if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { - ret = track_pfn_copy(dst_vma, src_vma, &pfn); - if (ret) - return ret; - } - /* * We need to invalidate the secondary MMU mappings only when * there could be a permission downgrade on the ptes of the @@ -1428,8 +1422,6 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) raw_write_seqcount_end(&src_mm->write_protect_seq); mmu_notifier_invalidate_range_end(&range); } - if (ret && unlikely(src_vma->vm_flags & VM_PFNMAP)) - untrack_pfn_copy(dst_vma, pfn); return ret; } @@ -1924,9 +1916,6 @@ static void unmap_single_vma(struct mmu_gather *tlb, if (vma->vm_file) uprobe_munmap(vma, start, end); - if (unlikely(vma->vm_flags & VM_PFNMAP)) - untrack_pfn(vma, 0, 0, mm_wr_locked); - if (start != end) { if (unlikely(is_vm_hugetlb_page(vma))) { /* @@ -2872,6 +2861,36 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, return error; } +#ifdef __HAVE_PFNMAP_TRACKING +static inline struct pfnmap_track_ctx *pfnmap_track_ctx_alloc(unsigned long pfn, + unsigned long size, pgprot_t *prot) +{ + struct pfnmap_track_ctx *ctx; + + if (pfnmap_track(pfn, size, prot)) + return ERR_PTR(-EINVAL); + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (unlikely(!ctx)) { + pfnmap_untrack(pfn, size); + return ERR_PTR(-ENOMEM); + } + + ctx->pfn = pfn; + ctx->size = size; + kref_init(&ctx->kref); + return ctx; +} + +void pfnmap_track_ctx_release(struct kref *ref) +{ + struct pfnmap_track_ctx *ctx = container_of(ref, struct pfnmap_track_ctx, kref); + + pfnmap_untrack(ctx->pfn, ctx->size); + kfree(ctx); +} +#endif /* __HAVE_PFNMAP_TRACKING */ + /** * remap_pfn_range - remap kernel memory to userspace * @vma: user vma to map to @@ -2884,20 +2903,51 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, * * Return: %0 on success, negative error code otherwise. */ +#ifdef __HAVE_PFNMAP_TRACKING int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { + struct pfnmap_track_ctx *ctx = NULL; int err; - err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); - if (err) + size = PAGE_ALIGN(size); + + /* + * If we cover the full VMA, we'll perform actual tracking, and + * remember to untrack when the last reference to our tracking + * context from a VMA goes away. We'll keep tracking the whole pfn + * range even during VMA splits and partial unmapping. + * + * If we only cover parts of the VMA, we'll only setup the cachemode + * in the pgprot for the pfn range. + */ + if (addr == vma->vm_start && addr + size == vma->vm_end) { + if (vma->pfnmap_track_ctx) + return -EINVAL; + ctx = pfnmap_track_ctx_alloc(pfn, size, &prot); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + } else if (pfnmap_setup_cachemode(pfn, size, &prot)) { return -EINVAL; + } err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); - if (err) - untrack_pfn(vma, pfn, PAGE_ALIGN(size), true); + if (ctx) { + if (err) + kref_put(&ctx->kref, pfnmap_track_ctx_release); + else + vma->pfnmap_track_ctx = ctx; + } return err; } + +#else +int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot) +{ + return remap_pfn_range_notrack(vma, addr, pfn, size, prot); +} +#endif EXPORT_SYMBOL(remap_pfn_range); /** diff --git a/mm/mmap.c b/mm/mmap.c index 50f902c08341..09c563c95112 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1784,11 +1784,6 @@ __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) tmp = vm_area_dup(mpnt); if (!tmp) goto fail_nomem; - - /* track_pfn_copy() will later take care of copying internal state. */ - if (unlikely(tmp->vm_flags & VM_PFNMAP)) - untrack_pfn_clear(tmp); - retval = vma_dup_policy(mpnt, tmp); if (retval) goto fail_nomem_policy; diff --git a/mm/mremap.c b/mm/mremap.c index 7db9da609c84..6e78e02f74bd 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -1191,10 +1191,6 @@ static int copy_vma_and_data(struct vma_remap_struct *vrm, if (is_vm_hugetlb_page(vma)) clear_vma_resv_huge_pages(vma); - /* Tell pfnmap has moved from this vma */ - if (unlikely(vma->vm_flags & VM_PFNMAP)) - untrack_pfn_clear(vma); - *new_vma_ptr = new_vma; return err; } diff --git a/mm/vma_init.c b/mm/vma_init.c index 967ca8517986..8e53c7943561 100644 --- a/mm/vma_init.c +++ b/mm/vma_init.c @@ -71,7 +71,51 @@ static void vm_area_init_from(const struct vm_area_struct *src, #ifdef CONFIG_NUMA dest->vm_policy = src->vm_policy; #endif +#ifdef __HAVE_PFNMAP_TRACKING + dest->pfnmap_track_ctx = NULL; +#endif +} + +#ifdef __HAVE_PFNMAP_TRACKING +static inline int vma_pfnmap_track_ctx_dup(struct vm_area_struct *orig, + struct vm_area_struct *new) +{ + struct pfnmap_track_ctx *ctx = orig->pfnmap_track_ctx; + + if (likely(!ctx)) + return 0; + + /* + * We don't expect to ever hit this. If ever required, we would have + * to duplicate the tracking. + */ + if (unlikely(kref_read(&ctx->kref) >= REFCOUNT_MAX)) + return -ENOMEM; + kref_get(&ctx->kref); + new->pfnmap_track_ctx = ctx; + return 0; +} + +static inline void vma_pfnmap_track_ctx_release(struct vm_area_struct *vma) +{ + struct pfnmap_track_ctx *ctx = vma->pfnmap_track_ctx; + + if (likely(!ctx)) + return; + + kref_put(&ctx->kref, pfnmap_track_ctx_release); + vma->pfnmap_track_ctx = NULL; +} +#else +static inline int vma_pfnmap_track_ctx_dup(struct vm_area_struct *orig, + struct vm_area_struct *new) +{ + return 0; } +static inline void vma_pfnmap_track_ctx_release(struct vm_area_struct *vma) +{ +} +#endif struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) { @@ -83,6 +127,11 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); ASSERT_EXCLUSIVE_WRITER(orig->vm_file); vm_area_init_from(orig, new); + + if (vma_pfnmap_track_ctx_dup(orig, new)) { + kmem_cache_free(vm_area_cachep, new); + return NULL; + } vma_lock_init(new, true); INIT_LIST_HEAD(&new->anon_vma_chain); vma_numab_state_init(new); @@ -97,5 +146,6 @@ void vm_area_free(struct vm_area_struct *vma) vma_assert_detached(vma); vma_numab_state_free(vma); free_anon_vma_name(vma); + vma_pfnmap_track_ctx_release(vma); kmem_cache_free(vm_area_cachep, vma); } -- 2.51.0 From 7bd7d74ec01954fde9eb65b065eb55bcda4f86e2 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 12 May 2025 14:34:18 +0200 Subject: [PATCH 08/16] x86/mm/pat: remove old pfnmap tracking interface We can now get rid of the old interface along with get_pat_info() and follow_phys(). Link: https://lkml.kernel.org/r/20250512123424.637989-6-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Acked-by: Ingo Molnar [x86 bits] Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: "H. Peter Anvin" Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- arch/x86/mm/pat/memtype.c | 147 -------------------------------------- include/linux/pgtable.h | 66 ----------------- 2 files changed, 213 deletions(-) diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 1ec8af6cad6b..c88d1cbdc1de 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -933,119 +933,6 @@ static void free_pfn_range(u64 paddr, unsigned long size) memtype_free(paddr, paddr + size); } -static int follow_phys(struct vm_area_struct *vma, unsigned long *prot, - resource_size_t *phys) -{ - struct follow_pfnmap_args args = { .vma = vma, .address = vma->vm_start }; - - if (follow_pfnmap_start(&args)) - return -EINVAL; - - /* Never return PFNs of anon folios in COW mappings. */ - if (!args.special) { - follow_pfnmap_end(&args); - return -EINVAL; - } - - *prot = pgprot_val(args.pgprot); - *phys = (resource_size_t)args.pfn << PAGE_SHIFT; - follow_pfnmap_end(&args); - return 0; -} - -static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, - pgprot_t *pgprot) -{ - unsigned long prot; - - VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT)); - - /* - * We need the starting PFN and cachemode used for track_pfn_remap() - * that covered the whole VMA. For most mappings, we can obtain that - * information from the page tables. For COW mappings, we might now - * suddenly have anon folios mapped and follow_phys() will fail. - * - * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to - * detect the PFN. If we need the cachemode as well, we're out of luck - * for now and have to fail fork(). - */ - if (!follow_phys(vma, &prot, paddr)) { - if (pgprot) - *pgprot = __pgprot(prot); - return 0; - } - if (is_cow_mapping(vma->vm_flags)) { - if (pgprot) - return -EINVAL; - *paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; - return 0; - } - WARN_ON_ONCE(1); - return -EINVAL; -} - -int track_pfn_copy(struct vm_area_struct *dst_vma, - struct vm_area_struct *src_vma, unsigned long *pfn) -{ - const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start; - resource_size_t paddr; - pgprot_t pgprot; - int rc; - - if (!(src_vma->vm_flags & VM_PAT)) - return 0; - - /* - * Duplicate the PAT information for the dst VMA based on the src - * VMA. - */ - if (get_pat_info(src_vma, &paddr, &pgprot)) - return -EINVAL; - rc = reserve_pfn_range(paddr, vma_size, &pgprot, 1); - if (rc) - return rc; - - /* Reservation for the destination VMA succeeded. */ - vm_flags_set(dst_vma, VM_PAT); - *pfn = PHYS_PFN(paddr); - return 0; -} - -void untrack_pfn_copy(struct vm_area_struct *dst_vma, unsigned long pfn) -{ - untrack_pfn(dst_vma, pfn, dst_vma->vm_end - dst_vma->vm_start, true); - /* - * Reservation was freed, any copied page tables will get cleaned - * up later, but without getting PAT involved again. - */ -} - -/* - * prot is passed in as a parameter for the new mapping. If the vma has - * a linear pfn mapping for the entire range, or no vma is provided, - * reserve the entire pfn + size range with single reserve_pfn_range - * call. - */ -int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, - unsigned long pfn, unsigned long addr, unsigned long size) -{ - resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; - - /* reserve the whole chunk starting from paddr */ - if (!vma || (addr == vma->vm_start - && size == (vma->vm_end - vma->vm_start))) { - int ret; - - ret = reserve_pfn_range(paddr, size, prot, 0); - if (ret == 0 && vma) - vm_flags_set(vma, VM_PAT); - return ret; - } - - return pfnmap_setup_cachemode(pfn, size, prot); -} - int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, pgprot_t *prot) { resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; @@ -1082,40 +969,6 @@ void pfnmap_untrack(unsigned long pfn, unsigned long size) free_pfn_range(paddr, size); } -/* - * untrack_pfn is called while unmapping a pfnmap for a region. - * untrack can be called for a specific region indicated by pfn and size or - * can be for the entire vma (in which case pfn, size are zero). - */ -void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, - unsigned long size, bool mm_wr_locked) -{ - resource_size_t paddr; - - if (vma && !(vma->vm_flags & VM_PAT)) - return; - - /* free the chunk starting from pfn or the whole chunk */ - paddr = (resource_size_t)pfn << PAGE_SHIFT; - if (!paddr && !size) { - if (get_pat_info(vma, &paddr, NULL)) - return; - size = vma->vm_end - vma->vm_start; - } - free_pfn_range(paddr, size); - if (vma) { - if (mm_wr_locked) - vm_flags_clear(vma, VM_PAT); - else - __vm_flags_mod(vma, 0, VM_PAT); - } -} - -void untrack_pfn_clear(struct vm_area_struct *vma) -{ - vm_flags_clear(vma, VM_PAT); -} - pgprot_t pgprot_writecombine(pgprot_t prot) { pgprot_set_cachemode(&prot, _PAGE_CACHE_MODE_WC); diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 90f72cd35839..0b6e1f781d86 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1485,17 +1485,6 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) * vmf_insert_pfn. */ -/* - * track_pfn_remap is called when a _new_ pfn mapping is being established - * by remap_pfn_range() for physical range indicated by pfn and size. - */ -static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, - unsigned long pfn, unsigned long addr, - unsigned long size) -{ - return 0; -} - static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, pgprot_t *prot) { @@ -1511,55 +1500,7 @@ static inline int pfnmap_track(unsigned long pfn, unsigned long size, static inline void pfnmap_untrack(unsigned long pfn, unsigned long size) { } - -/* - * track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page - * tables copied during copy_page_range(). Will store the pfn to be - * passed to untrack_pfn_copy() only if there is something to be untracked. - * Callers should initialize the pfn to 0. - */ -static inline int track_pfn_copy(struct vm_area_struct *dst_vma, - struct vm_area_struct *src_vma, unsigned long *pfn) -{ - return 0; -} - -/* - * untrack_pfn_copy is called when a VM_PFNMAP VMA failed to copy during - * copy_page_range(), but after track_pfn_copy() was already called. Can - * be called even if track_pfn_copy() did not actually track anything: - * handled internally. - */ -static inline void untrack_pfn_copy(struct vm_area_struct *dst_vma, - unsigned long pfn) -{ -} - -/* - * untrack_pfn is called while unmapping a pfnmap for a region. - * untrack can be called for a specific region indicated by pfn and size or - * can be for the entire vma (in which case pfn, size are zero). - */ -static inline void untrack_pfn(struct vm_area_struct *vma, - unsigned long pfn, unsigned long size, - bool mm_wr_locked) -{ -} - -/* - * untrack_pfn_clear is called in the following cases on a VM_PFNMAP VMA: - * - * 1) During mremap() on the src VMA after the page tables were moved. - * 2) During fork() on the dst VMA, immediately after duplicating the src VMA. - */ -static inline void untrack_pfn_clear(struct vm_area_struct *vma) -{ -} #else -extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, - unsigned long pfn, unsigned long addr, - unsigned long size); - /** * pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range * @pfn: the start of the pfn range @@ -1614,13 +1555,6 @@ int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot); * Untrack a pfn range previously tracked through pfnmap_track(). */ void pfnmap_untrack(unsigned long pfn, unsigned long size); -extern int track_pfn_copy(struct vm_area_struct *dst_vma, - struct vm_area_struct *src_vma, unsigned long *pfn); -extern void untrack_pfn_copy(struct vm_area_struct *dst_vma, - unsigned long pfn); -extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, - unsigned long size, bool mm_wr_locked); -extern void untrack_pfn_clear(struct vm_area_struct *vma); #endif /** -- 2.51.0 From cba4dbeb7bfcf8b69d491288c3bf877ead883214 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 12 May 2025 14:34:19 +0200 Subject: [PATCH 09/16] mm: remove VM_PAT It's unused, so let's remove it. Link: https://lkml.kernel.org/r/20250512123424.637989-7-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Acked-by: Ingo Molnar [x86 bits] Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: "H. Peter Anvin" Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/mm.h | 4 +--- include/trace/events/mmflags.h | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 43748c8f3454..a916ea42cfd5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -357,9 +357,7 @@ extern unsigned int kobjsize(const void *objp); # define VM_SHADOW_STACK VM_NONE #endif -#if defined(CONFIG_X86) -# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ -#elif defined(CONFIG_PPC64) +#if defined(CONFIG_PPC64) # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 15aae955a10b..aa441f593e9a 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -172,9 +172,7 @@ IF_HAVE_PG_ARCH_3(arch_3) __def_pageflag_names \ ) : "none" -#if defined(CONFIG_X86) -#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } -#elif defined(CONFIG_PPC64) +#if defined(CONFIG_PPC64) #define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } #elif defined(CONFIG_PARISC) #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } -- 2.51.0 From b3662fb91b98fc9503589ea98634fd48eee19426 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 12 May 2025 14:34:20 +0200 Subject: [PATCH 10/16] x86/mm/pat: remove strict_prot parameter from reserve_pfn_range() Always set to 0, so let's remove it. Link: https://lkml.kernel.org/r/20250512123424.637989-8-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Acked-by: Ingo Molnar [x86 bits] Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: "H. Peter Anvin" Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- arch/x86/mm/pat/memtype.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index c88d1cbdc1de..ccc55c00b4c8 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -858,8 +858,7 @@ int memtype_kernel_map_sync(u64 base, unsigned long size, * Reserved non RAM regions only and after successful memtype_reserve, * this func also keeps identity mapping (if any) in sync with this new prot. */ -static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, - int strict_prot) +static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot) { int is_ram = 0; int ret; @@ -895,8 +894,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, return ret; if (pcm != want_pcm) { - if (strict_prot || - !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { + if (!is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { memtype_free(paddr, paddr + size); pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", current->comm, current->pid, @@ -906,10 +904,6 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, cattr_name(pcm)); return -EINVAL; } - /* - * We allow returning different type than the one requested in - * non strict case. - */ pgprot_set_cachemode(vma_prot, pcm); } @@ -959,7 +953,7 @@ int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot) { const resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; - return reserve_pfn_range(paddr, size, prot, 0); + return reserve_pfn_range(paddr, size, prot); } void pfnmap_untrack(unsigned long pfn, unsigned long size) -- 2.51.0 From 81baf8450165ac57cca52f73b541a1056a2ca676 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 12 May 2025 14:34:21 +0200 Subject: [PATCH 11/16] x86/mm/pat: remove MEMTYPE_*_MATCH The "memramp() shrinking" scenario no longer applies, so let's remove that now-unnecessary handling. Link: https://lkml.kernel.org/r/20250512123424.637989-9-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Acked-by: Ingo Molnar [x86 bits] Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: "H. Peter Anvin" Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- arch/x86/mm/pat/memtype_interval.c | 44 ++++-------------------------- 1 file changed, 6 insertions(+), 38 deletions(-) diff --git a/arch/x86/mm/pat/memtype_interval.c b/arch/x86/mm/pat/memtype_interval.c index 645613d59942..9d03f0dbc471 100644 --- a/arch/x86/mm/pat/memtype_interval.c +++ b/arch/x86/mm/pat/memtype_interval.c @@ -49,26 +49,15 @@ INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end, static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED; -enum { - MEMTYPE_EXACT_MATCH = 0, - MEMTYPE_END_MATCH = 1 -}; - -static struct memtype *memtype_match(u64 start, u64 end, int match_type) +static struct memtype *memtype_match(u64 start, u64 end) { struct memtype *entry_match; entry_match = interval_iter_first(&memtype_rbroot, start, end-1); while (entry_match != NULL && entry_match->start < end) { - if ((match_type == MEMTYPE_EXACT_MATCH) && - (entry_match->start == start) && (entry_match->end == end)) - return entry_match; - - if ((match_type == MEMTYPE_END_MATCH) && - (entry_match->start < start) && (entry_match->end == end)) + if (entry_match->start == start && entry_match->end == end) return entry_match; - entry_match = interval_iter_next(entry_match, start, end-1); } @@ -132,32 +121,11 @@ struct memtype *memtype_erase(u64 start, u64 end) { struct memtype *entry_old; - /* - * Since the memtype_rbroot tree allows overlapping ranges, - * memtype_erase() checks with EXACT_MATCH first, i.e. free - * a whole node for the munmap case. If no such entry is found, - * it then checks with END_MATCH, i.e. shrink the size of a node - * from the end for the mremap case. - */ - entry_old = memtype_match(start, end, MEMTYPE_EXACT_MATCH); - if (!entry_old) { - entry_old = memtype_match(start, end, MEMTYPE_END_MATCH); - if (!entry_old) - return ERR_PTR(-EINVAL); - } - - if (entry_old->start == start) { - /* munmap: erase this node */ - interval_remove(entry_old, &memtype_rbroot); - } else { - /* mremap: update the end value of this node */ - interval_remove(entry_old, &memtype_rbroot); - entry_old->end = start; - interval_insert(entry_old, &memtype_rbroot); - - return NULL; - } + entry_old = memtype_match(start, end); + if (!entry_old) + return ERR_PTR(-EINVAL); + interval_remove(entry_old, &memtype_rbroot); return entry_old; } -- 2.51.0 From 99e27b047c4ce17feed4bb5c5cb2f1521470afcd Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 12 May 2025 14:34:22 +0200 Subject: [PATCH 12/16] x86/mm/pat: inline memtype_match() into memtype_erase() Let's just have it in a single function. The resulting function is certainly small enough and readable. Link: https://lkml.kernel.org/r/20250512123424.637989-10-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- arch/x86/mm/pat/memtype_interval.c | 33 +++++++++--------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/arch/x86/mm/pat/memtype_interval.c b/arch/x86/mm/pat/memtype_interval.c index 9d03f0dbc471..e5844ed1311e 100644 --- a/arch/x86/mm/pat/memtype_interval.c +++ b/arch/x86/mm/pat/memtype_interval.c @@ -49,21 +49,6 @@ INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end, static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED; -static struct memtype *memtype_match(u64 start, u64 end) -{ - struct memtype *entry_match; - - entry_match = interval_iter_first(&memtype_rbroot, start, end-1); - - while (entry_match != NULL && entry_match->start < end) { - if (entry_match->start == start && entry_match->end == end) - return entry_match; - entry_match = interval_iter_next(entry_match, start, end-1); - } - - return NULL; /* Returns NULL if there is no match */ -} - static int memtype_check_conflict(u64 start, u64 end, enum page_cache_mode reqtype, enum page_cache_mode *newtype) @@ -119,14 +104,16 @@ int memtype_check_insert(struct memtype *entry_new, enum page_cache_mode *ret_ty struct memtype *memtype_erase(u64 start, u64 end) { - struct memtype *entry_old; - - entry_old = memtype_match(start, end); - if (!entry_old) - return ERR_PTR(-EINVAL); - - interval_remove(entry_old, &memtype_rbroot); - return entry_old; + struct memtype *entry = interval_iter_first(&memtype_rbroot, start, end - 1); + + while (entry && entry->start < end) { + if (entry->start == start && entry->end == end) { + interval_remove(entry, &memtype_rbroot); + return entry; + } + entry = interval_iter_next(entry, start, end - 1); + } + return ERR_PTR(-EINVAL); } struct memtype *memtype_lookup(u64 addr) -- 2.51.0 From 11c82e71817779fc002d47c5beb6ad9df444289a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 12 May 2025 14:34:23 +0200 Subject: [PATCH 13/16] drm/i915: track_pfn() -> "pfnmap tracking" track_pfn() does not exist, let's simply refer to it as "pfnmap tracking". Link: https://lkml.kernel.org/r/20250512123424.637989-11-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Acked-by: Ingo Molnar [x86 bits] Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: "H. Peter Anvin" Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- drivers/gpu/drm/i915/i915_mm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 76e2801619f0..c33bd3d83069 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -100,7 +100,7 @@ int remap_io_mapping(struct vm_area_struct *vma, GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); - /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */ r.mm = vma->vm_mm; r.pfn = pfn; r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | @@ -140,7 +140,7 @@ int remap_io_sg(struct vm_area_struct *vma, }; int err; - /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); while (offset >= r.sgt.max >> PAGE_SHIFT) { -- 2.51.0 From a624c424d5d3778863ac67f87a374c22b1520c27 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 12 May 2025 14:34:24 +0200 Subject: [PATCH 14/16] mm/io-mapping: track_pfn() -> "pfnmap tracking" track_pfn() does not exist, let's simply refer to it as "pfnmap tracking". Link: https://lkml.kernel.org/r/20250512123424.637989-12-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Acked-by: Ingo Molnar [x86 bits] Reviewed-by: Liam R. Howlett Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Airlie Cc: "H. Peter Anvin" Cc: Jani Nikula Cc: Jann Horn Cc: Jonas Lahtinen Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Peter Xu Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Steven Rostedt Cc: Thomas Gleinxer Cc: Tvrtko Ursulin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/io-mapping.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/io-mapping.c b/mm/io-mapping.c index f44a6a134712..d3586e95c12c 100644 --- a/mm/io-mapping.c +++ b/mm/io-mapping.c @@ -24,7 +24,7 @@ int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, pgprot_t remap_prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); - /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */ return remap_pfn_range_notrack(vma, addr, pfn, size, remap_prot); } EXPORT_SYMBOL_GPL(io_mapping_map_user); -- 2.51.0 From 5053383829ab2b17dc4766a832004c848f4af9df Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 12 May 2025 10:57:11 +0800 Subject: [PATCH 15/16] mm: khugepaged: convert set_huge_pmd() to take a folio We've already gotten the stable locked folio in collapse_pte_mapped_thp(), so just use folio for set_huge_pmd() to set the PMD entry, which is more straightforward. Moreover, we will check the folio size in do_set_pmd(), so we can remove the unnecessary VM_BUG_ON() in set_huge_pmd(). While we are at it, we can also remove the PageTransHuge(), as it currently has no callers. Link: https://lkml.kernel.org/r/110c3e1ec5fe7854a0e2c95ffcbc985817180ed7.1747017104.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: David Hildenbrand Cc: Dev Jain Cc: Johannes Weiner Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Mariano Pache Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Mike Rapoport Cc: Ryan Roberts Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Zi Yan Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 15 --------------- mm/khugepaged.c | 11 +++++------ 2 files changed, 5 insertions(+), 21 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 37b11f15dbd9..1c1d49554c71 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -907,20 +907,6 @@ FOLIO_FLAG_FALSE(partially_mapped) #define PG_head_mask ((1UL << PG_head)) #ifdef CONFIG_TRANSPARENT_HUGEPAGE -/* - * PageHuge() only returns true for hugetlbfs pages, but not for - * normal or transparent huge pages. - * - * PageTransHuge() returns true for both transparent huge and - * hugetlbfs pages, but not normal pages. PageTransHuge() can only be - * called only in the core VM paths where hugetlbfs pages can't exist. - */ -static inline int PageTransHuge(const struct page *page) -{ - VM_BUG_ON_PAGE(PageTail(page), page); - return PageHead(page); -} - /* * PageTransCompound returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known @@ -931,7 +917,6 @@ static inline int PageTransCompound(const struct page *page) return PageCompound(page); } #else -TESTPAGEFLAG_FALSE(TransHuge, transhuge) TESTPAGEFLAG_FALSE(TransCompound, transcompound) #endif diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b04b6a770afe..33daea8f667e 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1465,9 +1465,9 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot) } #ifdef CONFIG_SHMEM -/* hpage must be locked, and mmap_lock must be held */ +/* folio must be locked, and mmap_lock must be held */ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmdp, struct page *hpage) + pmd_t *pmdp, struct folio *folio, struct page *page) { struct vm_fault vmf = { .vma = vma, @@ -1476,13 +1476,12 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr, .pmd = pmdp, }; - VM_BUG_ON(!PageTransHuge(hpage)); mmap_assert_locked(vma->vm_mm); - if (do_set_pmd(&vmf, hpage)) + if (do_set_pmd(&vmf, page)) return SCAN_FAIL; - get_page(hpage); + folio_get(folio); return SCAN_SUCCEED; } @@ -1689,7 +1688,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, maybe_install_pmd: /* step 5: install pmd entry */ result = install_pmd - ? set_huge_pmd(vma, haddr, pmd, &folio->page) + ? set_huge_pmd(vma, haddr, pmd, folio, &folio->page) : SCAN_SUCCEED; goto drop_folio; abort: -- 2.51.0 From 698c0089cdf0b27d37f0b24824b682c23de5f72d Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 12 May 2025 10:57:12 +0800 Subject: [PATCH 16/16] mm: convert do_set_pmd() to take a folio In do_set_pmd(), we always use the folio->page to build PMD mappings for the entire folio. Since all callers of do_set_pmd() already hold a stable folio, converting do_set_pmd() to take a folio is safe and more straightforward. In addition, to ensure the extensibility of do_set_pmd() for supporting larger folios beyond PMD size, we keep the 'page' parameter to specify which page within the folio should be mapped. No functional changes expected. Link: https://lkml.kernel.org/r/9b488f4ecb4d3fd8634e3d448dd0ed6964482480.1747017104.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Zi Yan Acked-by: David Hildenbrand Cc: Dev Jain Cc: Johannes Weiner Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Mariano Pache Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Mike Rapoport Cc: Ryan Roberts Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 +- mm/filemap.c | 2 +- mm/khugepaged.c | 2 +- mm/memory.c | 11 +++++------ 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index a916ea42cfd5..cd2e513189d6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1235,7 +1235,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); +vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page); void set_pte_range(struct vm_fault *vmf, struct folio *folio, struct page *page, unsigned int nr, unsigned long addr); diff --git a/mm/filemap.c b/mm/filemap.c index 7b90cbeb4a1a..09d005848f0d 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3533,7 +3533,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { struct page *page = folio_file_page(folio, start); - vm_fault_t ret = do_set_pmd(vmf, page); + vm_fault_t ret = do_set_pmd(vmf, folio, page); if (!ret) { /* The page is mapped successfully, reference consumed. */ folio_unlock(folio); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 33daea8f667e..ebcd7c8a4b44 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1478,7 +1478,7 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr, mmap_assert_locked(vma->vm_mm); - if (do_set_pmd(&vmf, page)) + if (do_set_pmd(&vmf, folio, page)) return SCAN_FAIL; folio_get(folio); diff --git a/mm/memory.c b/mm/memory.c index 4cf4adb0de26..5cb48f262ab0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5227,9 +5227,8 @@ static void deposit_prealloc_pte(struct vm_fault *vmf) vmf->prealloc_pte = NULL; } -vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) +vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page) { - struct folio *folio = page_folio(page); struct vm_area_struct *vma = vmf->vma; bool write = vmf->flags & FAULT_FLAG_WRITE; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; @@ -5302,7 +5301,7 @@ out: return ret; } #else -vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) +vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page) { return VM_FAULT_FALLBACK; } @@ -5396,6 +5395,7 @@ fallback: else page = vmf->page; + folio = page_folio(page); /* * check even for read faults because we might have lost our CoWed * page @@ -5407,8 +5407,8 @@ fallback: } if (pmd_none(*vmf->pmd)) { - if (PageTransCompound(page)) { - ret = do_set_pmd(vmf, page); + if (folio_test_pmd_mappable(folio)) { + ret = do_set_pmd(vmf, folio, page); if (ret != VM_FAULT_FALLBACK) return ret; } @@ -5419,7 +5419,6 @@ fallback: return VM_FAULT_OOM; } - folio = page_folio(page); nr_pages = folio_nr_pages(folio); /* -- 2.51.0