From 3673167a3a07f25b3f06754d69f406edea65543a Mon Sep 17 00:00:00 2001 From: Edward Liaw Date: Fri, 18 Oct 2024 17:17:23 +0000 Subject: [PATCH 01/16] Revert "selftests/mm: replace atomic_bool with pthread_barrier_t" This reverts commit e61ef21e27e8deed8c474e9f47f4aa7bc37e138c. uffd_poll_thread may be called by other tests that do not initialize the pthread_barrier, so this approach is not correct. This will revert to using atomic_bool instead. Link: https://lkml.kernel.org/r/20241018171734.2315053-3-edliaw@google.com Fixes: e61ef21e27e8 ("selftests/mm: replace atomic_bool with pthread_barrier_t") Signed-off-by: Edward Liaw Cc: Ryan Roberts Cc: Peter Xu Cc: Shuah Khan Cc: Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-common.c | 5 ++--- tools/testing/selftests/mm/uffd-common.h | 3 ++- tools/testing/selftests/mm/uffd-unit-tests.c | 14 ++++++-------- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c index 852e7281026e..717539eddf98 100644 --- a/tools/testing/selftests/mm/uffd-common.c +++ b/tools/testing/selftests/mm/uffd-common.c @@ -18,7 +18,7 @@ bool test_uffdio_wp = true; unsigned long long *count_verify; uffd_test_ops_t *uffd_test_ops; uffd_test_case_ops_t *uffd_test_case_ops; -pthread_barrier_t ready_for_fork; +atomic_bool ready_for_fork; static int uffd_mem_fd_create(off_t mem_size, bool hugetlb) { @@ -519,8 +519,7 @@ void *uffd_poll_thread(void *arg) pollfd[1].fd = pipefd[cpu*2]; pollfd[1].events = POLLIN; - /* Ready for parent thread to fork */ - pthread_barrier_wait(&ready_for_fork); + ready_for_fork = true; for (;;) { ret = poll(pollfd, 2, -1); diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h index 3e6228d8e0dc..a70ae10b5f62 100644 --- a/tools/testing/selftests/mm/uffd-common.h +++ b/tools/testing/selftests/mm/uffd-common.h @@ -33,6 +33,7 @@ #include #include #include +#include #include "../kselftest.h" #include "vm_util.h" @@ -104,7 +105,7 @@ extern bool map_shared; extern bool test_uffdio_wp; extern unsigned long long *count_verify; extern volatile bool test_uffdio_copy_eexist; -extern pthread_barrier_t ready_for_fork; +extern atomic_bool ready_for_fork; extern uffd_test_ops_t anon_uffd_test_ops; extern uffd_test_ops_t shmem_uffd_test_ops; diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index 3db2296ac631..b3d21eed203d 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -774,7 +774,7 @@ static void uffd_sigbus_test_common(bool wp) char c; struct uffd_args args = { 0 }; - pthread_barrier_init(&ready_for_fork, NULL, 2); + ready_for_fork = false; fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK); @@ -791,9 +791,8 @@ static void uffd_sigbus_test_common(bool wp) if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args)) err("uffd_poll_thread create"); - /* Wait for child thread to start before forking */ - pthread_barrier_wait(&ready_for_fork); - pthread_barrier_destroy(&ready_for_fork); + while (!ready_for_fork) + ; /* Wait for the poll_thread to start executing before forking */ pid = fork(); if (pid < 0) @@ -834,7 +833,7 @@ static void uffd_events_test_common(bool wp) char c; struct uffd_args args = { 0 }; - pthread_barrier_init(&ready_for_fork, NULL, 2); + ready_for_fork = false; fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK); if (uffd_register(uffd, area_dst, nr_pages * page_size, @@ -845,9 +844,8 @@ static void uffd_events_test_common(bool wp) if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args)) err("uffd_poll_thread create"); - /* Wait for child thread to start before forking */ - pthread_barrier_wait(&ready_for_fork); - pthread_barrier_destroy(&ready_for_fork); + while (!ready_for_fork) + ; /* Wait for the poll_thread to start executing before forking */ pid = fork(); if (pid < 0) -- 2.51.0 From f2330b650e97a68c1afce66305f10651a9544037 Mon Sep 17 00:00:00 2001 From: Edward Liaw Date: Fri, 18 Oct 2024 17:17:24 +0000 Subject: [PATCH 02/16] selftests/mm: fix deadlock for fork after pthread_create with atomic_bool Some additional synchronization is needed on Android ARM64; we see a deadlock with pthread_create when the parent thread races forward before the child has a chance to start doing work. Link: https://lkml.kernel.org/r/20241018171734.2315053-4-edliaw@google.com Fixes: cff294582798 ("selftests/mm: extend and rename uffd pagemap test") Signed-off-by: Edward Liaw Cc: Ryan Roberts Cc: Peter Xu Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-unit-tests.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index b3d21eed203d..a2e71b1636e7 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -241,6 +241,8 @@ static void *fork_event_consumer(void *data) fork_event_args *args = data; struct uffd_msg msg = { 0 }; + ready_for_fork = true; + /* Read until a full msg received */ while (uffd_read_msg(args->parent_uffd, &msg)); @@ -308,8 +310,11 @@ static int pagemap_test_fork(int uffd, bool with_event, bool test_pin) /* Prepare a thread to resolve EVENT_FORK */ if (with_event) { + ready_for_fork = false; if (pthread_create(&thread, NULL, fork_event_consumer, &args)) err("pthread_create()"); + while (!ready_for_fork) + ; /* Wait for the poll_thread to start executing before forking */ } child = fork(); -- 2.51.0 From 58a039e679fe72bd0efa8b2abe669a7914bb4429 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 18 Oct 2024 18:14:15 +0200 Subject: [PATCH 03/16] mm: split critical region in remap_file_pages() and invoke LSMs in between Commit ea7e2d5e49c0 ("mm: call the security_mmap_file() LSM hook in remap_file_pages()") fixed a security issue, it added an LSM check when trying to remap file pages, so that LSMs have the opportunity to evaluate such action like for other memory operations such as mmap() and mprotect(). However, that commit called security_mmap_file() inside the mmap_lock lock, while the other calls do it before taking the lock, after commit 8b3ec6814c83 ("take security_mmap_file() outside of ->mmap_sem"). This caused lock inversion issue with IMA which was taking the mmap_lock and i_mutex lock in the opposite way when the remap_file_pages() system call was called. Solve the issue by splitting the critical region in remap_file_pages() in two regions: the first takes a read lock of mmap_lock, retrieves the VMA and the file descriptor associated, and calculates the 'prot' and 'flags' variables; the second takes a write lock on mmap_lock, checks that the VMA flags and the VMA file descriptor are the same as the ones obtained in the first critical region (otherwise the system call fails), and calls do_mmap(). In between, after releasing the read lock and before taking the write lock, call security_mmap_file(), and solve the lock inversion issue. Link: https://lkml.kernel.org/r/20241018161415.3845146-1-roberto.sassu@huaweicloud.com Fixes: ea7e2d5e49c0 ("mm: call the security_mmap_file() LSM hook in remap_file_pages()") Signed-off-by: Kirill A. Shutemov Signed-off-by: Roberto Sassu Reported-by: syzbot+1cd571a672400ef3a930@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-security-module/66f7b10e.050a0220.46d20.0036.GAE@google.com/ Tested-by: Roberto Sassu Reviewed-by: Roberto Sassu Reviewed-by: Jann Horn Reviewed-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Reviewed-by: Paul Moore Tested-by: syzbot+1cd571a672400ef3a930@syzkaller.appspotmail.com Cc: Jarkko Sakkinen Cc: Dmitry Kasatkin Cc: Eric Snowberg Cc: James Morris Cc: Mimi Zohar Cc: "Serge E. Hallyn" Cc: Shu Han Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/mmap.c | 69 +++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 17 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 582036922d05..1e0e34cb993f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1642,6 +1642,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long populate = 0; unsigned long ret = -EINVAL; struct file *file; + vm_flags_t vm_flags; pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", current->comm, current->pid); @@ -1658,12 +1659,60 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, if (pgoff + (size >> PAGE_SHIFT) < pgoff) return ret; - if (mmap_write_lock_killable(mm)) + if (mmap_read_lock_killable(mm)) return -EINTR; + /* + * Look up VMA under read lock first so we can perform the security + * without holding locks (which can be problematic). We reacquire a + * write lock later and check nothing changed underneath us. + */ vma = vma_lookup(mm, start); - if (!vma || !(vma->vm_flags & VM_SHARED)) + if (!vma || !(vma->vm_flags & VM_SHARED)) { + mmap_read_unlock(mm); + return -EINVAL; + } + + prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; + prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; + prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; + + flags &= MAP_NONBLOCK; + flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; + if (vma->vm_flags & VM_LOCKED) + flags |= MAP_LOCKED; + + /* Save vm_flags used to calculate prot and flags, and recheck later. */ + vm_flags = vma->vm_flags; + file = get_file(vma->vm_file); + + mmap_read_unlock(mm); + + /* Call outside mmap_lock to be consistent with other callers. */ + ret = security_mmap_file(file, prot, flags); + if (ret) { + fput(file); + return ret; + } + + ret = -EINVAL; + + /* OK security check passed, take write lock + let it rip. */ + if (mmap_write_lock_killable(mm)) { + fput(file); + return -EINTR; + } + + vma = vma_lookup(mm, start); + + if (!vma) + goto out; + + /* Make sure things didn't change under us. */ + if (vma->vm_flags != vm_flags) + goto out; + if (vma->vm_file != file) goto out; if (start + size > vma->vm_end) { @@ -1691,25 +1740,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, goto out; } - prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; - prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; - prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; - - flags &= MAP_NONBLOCK; - flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; - if (vma->vm_flags & VM_LOCKED) - flags |= MAP_LOCKED; - - file = get_file(vma->vm_file); - ret = security_mmap_file(vma->vm_file, prot, flags); - if (ret) - goto out_fput; ret = do_mmap(vma->vm_file, start, size, prot, flags, 0, pgoff, &populate, NULL); -out_fput: - fput(file); out: mmap_write_unlock(mm); + fput(file); if (populate) mm_populate(ret, populate); if (!IS_ERR_VALUE(ret)) -- 2.51.0 From 183430079869fcb4b2967800d7659bbeb6052d07 Mon Sep 17 00:00:00 2001 From: Jeff Xu Date: Tue, 8 Oct 2024 04:09:41 +0000 Subject: [PATCH 04/16] mseal: update mseal.rst MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Pedro Falcato's optimization [1] for checking sealed VMAs, which replaces the can_modify_mm() function with an in-loop check, necessitates an update to the mseal.rst documentation to reflect this change. Furthermore, the document has received offline comments regarding the code sample and suggestions for sentence clarification to enhance reader comprehension. [1] https://lore.kernel.org/linux-mm/20240817-mseal-depessimize-v3-0-d8d2e037df30@gmail.com/ Update doc after in-loop change: mprotect/madvise can have partially updated and munmap is atomic. Fix indentation and clarify some sections to improve readability. Link: https://lkml.kernel.org/r/20241008040942.1478931-2-jeffxu@chromium.org Fixes: df2a7df9a9aa ("mm/munmap: replace can_modify_mm with can_modify_vma") Fixes: 4a2dd02b0916 ("mm/mprotect: replace can_modify_mm with can_modify_vma") Fixes: 38075679b5f1 ("mm/mremap: replace can_modify_mm with can_modify_vma") Fixes: 23c57d1fa2b9 ("mseal: replace can_modify_mm_madv with a vma variant") Signed-off-by: Jeff Xu Reviewed-by: Randy Dunlap Cc: Elliott Hughes Cc: Greg Kroah-Hartman Cc: Guenter Roeck Cc: Jann Horn Cc: Jonathan Corbet Cc: Jorge Lucangeli Obes Cc: Kees Cook Cc: "Liam R. Howlett" Cc: Linus Torvalds Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Muhammad Usama Anjum Cc: Pedro Falcato Cc: Stephen Röttger Cc: Suren Baghdasaryan Cc: "Theo de Raadt" Signed-off-by: Andrew Morton --- Documentation/userspace-api/mseal.rst | 307 +++++++++++++------------- 1 file changed, 148 insertions(+), 159 deletions(-) diff --git a/Documentation/userspace-api/mseal.rst b/Documentation/userspace-api/mseal.rst index 4132eec995a3..41102f74c5e2 100644 --- a/Documentation/userspace-api/mseal.rst +++ b/Documentation/userspace-api/mseal.rst @@ -23,177 +23,166 @@ applications can additionally seal security critical data at runtime. A similar feature already exists in the XNU kernel with the VM_FLAGS_PERMANENT flag [1] and on OpenBSD with the mimmutable syscall [2]. -User API -======== -mseal() ------------ -The mseal() syscall has the following signature: - -``int mseal(void addr, size_t len, unsigned long flags)`` - -**addr/len**: virtual memory address range. - -The address range set by ``addr``/``len`` must meet: - - The start address must be in an allocated VMA. - - The start address must be page aligned. - - The end address (``addr`` + ``len``) must be in an allocated VMA. - - no gap (unallocated memory) between start and end address. - -The ``len`` will be paged aligned implicitly by the kernel. - -**flags**: reserved for future use. - -**return values**: - -- ``0``: Success. - -- ``-EINVAL``: - - Invalid input ``flags``. - - The start address (``addr``) is not page aligned. - - Address range (``addr`` + ``len``) overflow. - -- ``-ENOMEM``: - - The start address (``addr``) is not allocated. - - The end address (``addr`` + ``len``) is not allocated. - - A gap (unallocated memory) between start and end address. - -- ``-EPERM``: - - sealing is supported only on 64-bit CPUs, 32-bit is not supported. - -- For above error cases, users can expect the given memory range is - unmodified, i.e. no partial update. - -- There might be other internal errors/cases not listed here, e.g. - error during merging/splitting VMAs, or the process reaching the max - number of supported VMAs. In those cases, partial updates to the given - memory range could happen. However, those cases should be rare. - -**Blocked operations after sealing**: - Unmapping, moving to another location, and shrinking the size, - via munmap() and mremap(), can leave an empty space, therefore - can be replaced with a VMA with a new set of attributes. - - Moving or expanding a different VMA into the current location, - via mremap(). - - Modifying a VMA via mmap(MAP_FIXED). - - Size expansion, via mremap(), does not appear to pose any - specific risks to sealed VMAs. It is included anyway because - the use case is unclear. In any case, users can rely on - merging to expand a sealed VMA. - - mprotect() and pkey_mprotect(). - - Some destructive madvice() behaviors (e.g. MADV_DONTNEED) - for anonymous memory, when users don't have write permission to the - memory. Those behaviors can alter region contents by discarding pages, - effectively a memset(0) for anonymous memory. - - Kernel will return -EPERM for blocked operations. - - For blocked operations, one can expect the given address is unmodified, - i.e. no partial update. Note, this is different from existing mm - system call behaviors, where partial updates are made till an error is - found and returned to userspace. To give an example: - - Assume following code sequence: - - - ptr = mmap(null, 8192, PROT_NONE); - - munmap(ptr + 4096, 4096); - - ret1 = mprotect(ptr, 8192, PROT_READ); - - mseal(ptr, 4096); - - ret2 = mprotect(ptr, 8192, PROT_NONE); - - ret1 will be -ENOMEM, the page from ptr is updated to PROT_READ. - - ret2 will be -EPERM, the page remains to be PROT_READ. - -**Note**: - -- mseal() only works on 64-bit CPUs, not 32-bit CPU. - -- users can call mseal() multiple times, mseal() on an already sealed memory - is a no-action (not error). - -- munseal() is not supported. - -Use cases: -========== +SYSCALL +======= +mseal syscall signature +----------------------- + ``int mseal(void \* addr, size_t len, unsigned long flags)`` + + **addr**/**len**: virtual memory address range. + The address range set by **addr**/**len** must meet: + - The start address must be in an allocated VMA. + - The start address must be page aligned. + - The end address (**addr** + **len**) must be in an allocated VMA. + - no gap (unallocated memory) between start and end address. + + The ``len`` will be paged aligned implicitly by the kernel. + + **flags**: reserved for future use. + + **Return values**: + - **0**: Success. + - **-EINVAL**: + * Invalid input ``flags``. + * The start address (``addr``) is not page aligned. + * Address range (``addr`` + ``len``) overflow. + - **-ENOMEM**: + * The start address (``addr``) is not allocated. + * The end address (``addr`` + ``len``) is not allocated. + * A gap (unallocated memory) between start and end address. + - **-EPERM**: + * sealing is supported only on 64-bit CPUs, 32-bit is not supported. + + **Note about error return**: + - For above error cases, users can expect the given memory range is + unmodified, i.e. no partial update. + - There might be other internal errors/cases not listed here, e.g. + error during merging/splitting VMAs, or the process reaching the maximum + number of supported VMAs. In those cases, partial updates to the given + memory range could happen. However, those cases should be rare. + + **Architecture support**: + mseal only works on 64-bit CPUs, not 32-bit CPUs. + + **Idempotent**: + users can call mseal multiple times. mseal on an already sealed memory + is a no-action (not error). + + **no munseal** + Once mapping is sealed, it can't be unsealed. The kernel should never + have munseal, this is consistent with other sealing feature, e.g. + F_SEAL_SEAL for file. + +Blocked mm syscall for sealed mapping +------------------------------------- + It might be important to note: **once the mapping is sealed, it will + stay in the process's memory until the process terminates**. + + Example:: + + *ptr = mmap(0, 4096, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); + rc = mseal(ptr, 4096, 0); + /* munmap will fail */ + rc = munmap(ptr, 4096); + assert(rc < 0); + + Blocked mm syscall: + - munmap + - mmap + - mremap + - mprotect and pkey_mprotect + - some destructive madvise behaviors: MADV_DONTNEED, MADV_FREE, + MADV_DONTNEED_LOCKED, MADV_FREE, MADV_DONTFORK, MADV_WIPEONFORK + + The first set of syscalls to block is munmap, mremap, mmap. They can + either leave an empty space in the address space, therefore allowing + replacement with a new mapping with new set of attributes, or can + overwrite the existing mapping with another mapping. + + mprotect and pkey_mprotect are blocked because they changes the + protection bits (RWX) of the mapping. + + Certain destructive madvise behaviors, specifically MADV_DONTNEED, + MADV_FREE, MADV_DONTNEED_LOCKED, and MADV_WIPEONFORK, can introduce + risks when applied to anonymous memory by threads lacking write + permissions. Consequently, these operations are prohibited under such + conditions. The aforementioned behaviors have the potential to modify + region contents by discarding pages, effectively performing a memset(0) + operation on the anonymous memory. + + Kernel will return -EPERM for blocked syscalls. + + When blocked syscall return -EPERM due to sealing, the memory regions may + or may not be changed, depends on the syscall being blocked: + + - munmap: munmap is atomic. If one of VMAs in the given range is + sealed, none of VMAs are updated. + - mprotect, pkey_mprotect, madvise: partial update might happen, e.g. + when mprotect over multiple VMAs, mprotect might update the beginning + VMAs before reaching the sealed VMA and return -EPERM. + - mmap and mremap: undefined behavior. + +Use cases +========= - glibc: The dynamic linker, during loading ELF executables, can apply sealing to - non-writable memory segments. - -- Chrome browser: protect some security sensitive data-structures. + mapping segments. -Notes on which memory to seal: -============================== +- Chrome browser: protect some security sensitive data structures. -It might be important to note that sealing changes the lifetime of a mapping, -i.e. the sealed mapping won’t be unmapped till the process terminates or the -exec system call is invoked. Applications can apply sealing to any virtual -memory region from userspace, but it is crucial to thoroughly analyze the -mapping's lifetime prior to apply the sealing. +When not to use mseal +===================== +Applications can apply sealing to any virtual memory region from userspace, +but it is *crucial to thoroughly analyze the mapping's lifetime* prior to +apply the sealing. This is because the sealed mapping *won’t be unmapped* +until the process terminates or the exec system call is invoked. For example: + - aio/shm + aio/shm can call mmap and munmap on behalf of userspace, e.g. + ksys_shmdt() in shm.c. The lifetimes of those mapping are not tied to + the lifetime of the process. If those memories are sealed from userspace, + then munmap will fail, causing leaks in VMA address space during the + lifetime of the process. + + - ptr allocated by malloc (heap) + Don't use mseal on the memory ptr return from malloc(). + malloc() is implemented by allocator, e.g. by glibc. Heap manager might + allocate a ptr from brk or mapping created by mmap. + If an app calls mseal on a ptr returned from malloc(), this can affect + the heap manager's ability to manage the mappings; the outcome is + non-deterministic. + + Example:: + + ptr = malloc(size); + /* don't call mseal on ptr return from malloc. */ + mseal(ptr, size); + /* free will success, allocator can't shrink heap lower than ptr */ + free(ptr); + +mseal doesn't block +=================== +In a nutshell, mseal blocks certain mm syscall from modifying some of VMA's +attributes, such as protection bits (RWX). Sealed mappings doesn't mean the +memory is immutable. -- aio/shm - - aio/shm can call mmap()/munmap() on behalf of userspace, e.g. ksys_shmdt() in - shm.c. The lifetime of those mapping are not tied to the lifetime of the - process. If those memories are sealed from userspace, then munmap() will fail, - causing leaks in VMA address space during the lifetime of the process. - -- Brk (heap) - - Currently, userspace applications can seal parts of the heap by calling - malloc() and mseal(). - let's assume following calls from user space: - - - ptr = malloc(size); - - mprotect(ptr, size, RO); - - mseal(ptr, size); - - free(ptr); - - Technically, before mseal() is added, the user can change the protection of - the heap by calling mprotect(RO). As long as the user changes the protection - back to RW before free(), the memory range can be reused. - - Adding mseal() into the picture, however, the heap is then sealed partially, - the user can still free it, but the memory remains to be RO. If the address - is re-used by the heap manager for another malloc, the process might crash - soon after. Therefore, it is important not to apply sealing to any memory - that might get recycled. - - Furthermore, even if the application never calls the free() for the ptr, - the heap manager may invoke the brk system call to shrink the size of the - heap. In the kernel, the brk-shrink will call munmap(). Consequently, - depending on the location of the ptr, the outcome of brk-shrink is - nondeterministic. - - -Additional notes: -================= As Jann Horn pointed out in [3], there are still a few ways to write -to RO memory, which is, in a way, by design. Those cases are not covered -by mseal(). If applications want to block such cases, sandbox tools (such as -seccomp, LSM, etc) might be considered. +to RO memory, which is, in a way, by design. And those could be blocked +by different security measures. Those cases are: -- Write to read-only memory through /proc/self/mem interface. -- Write to read-only memory through ptrace (such as PTRACE_POKETEXT). -- userfaultfd. + - Write to read-only memory through /proc/self/mem interface (FOLL_FORCE). + - Write to read-only memory through ptrace (such as PTRACE_POKETEXT). + - userfaultfd. The idea that inspired this patch comes from Stephen Röttger’s work in V8 CFI [4]. Chrome browser in ChromeOS will be the first user of this API. -Reference: -========== -[1] https://github.com/apple-oss-distributions/xnu/blob/1031c584a5e37aff177559b9f69dbd3c8c3fd30a/osfmk/mach/vm_statistics.h#L274 - -[2] https://man.openbsd.org/mimmutable.2 - -[3] https://lore.kernel.org/lkml/CAG48ez3ShUYey+ZAFsU2i1RpQn0a5eOs2hzQ426FkcgnfUGLvA@mail.gmail.com - -[4] https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/edit#heading=h.bvaojj9fu6hc +Reference +========= +- [1] https://github.com/apple-oss-distributions/xnu/blob/1031c584a5e37aff177559b9f69dbd3c8c3fd30a/osfmk/mach/vm_statistics.h#L274 +- [2] https://man.openbsd.org/mimmutable.2 +- [3] https://lore.kernel.org/lkml/CAG48ez3ShUYey+ZAFsU2i1RpQn0a5eOs2hzQ426FkcgnfUGLvA@mail.gmail.com +- [4] https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/edit#heading=h.bvaojj9fu6hc -- 2.51.0 From 01626a18230246efdcea322aa8f067e60ffe5ccd Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 27 Sep 2024 09:19:36 +1200 Subject: [PATCH 05/16] mm: avoid unconditional one-tick sleep when swapcache_prepare fails Commit 13ddaf26be32 ("mm/swap: fix race when skipping swapcache") introduced an unconditional one-tick sleep when `swapcache_prepare()` fails, which has led to reports of UI stuttering on latency-sensitive Android devices. To address this, we can use a waitqueue to wake up tasks that fail `swapcache_prepare()` sooner, instead of always sleeping for a full tick. While tasks may occasionally be woken by an unrelated `do_swap_page()`, this method is preferable to two scenarios: rapid re-entry into page faults, which can cause livelocks, and multiple millisecond sleeps, which visibly degrade user experience. Oven's testing shows that a single waitqueue resolves the UI stuttering issue. If a 'thundering herd' problem becomes apparent later, a waitqueue hash similar to `folio_wait_table[PAGE_WAIT_TABLE_SIZE]` for page bit locks can be introduced. [v-songbaohua@oppo.com: wake_up only when swapcache_wq waitqueue is active] Link: https://lkml.kernel.org/r/20241008130807.40833-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240926211936.75373-1-21cnbao@gmail.com Fixes: 13ddaf26be32 ("mm/swap: fix race when skipping swapcache") Signed-off-by: Barry Song Reported-by: Oven Liyang Tested-by: Oven Liyang Cc: Kairui Song Cc: "Huang, Ying" Cc: Yu Zhao Cc: David Hildenbrand Cc: Chris Li Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Minchan Kim Cc: Yosry Ahmed Cc: SeongJae Park Cc: Kalesh Singh Cc: Suren Baghdasaryan Cc: Signed-off-by: Andrew Morton --- mm/memory.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 3ccee51adfbb..bdf77a3ec47b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4187,6 +4187,8 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq); + /* * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. @@ -4199,6 +4201,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct folio *swapcache, *folio = NULL; + DECLARE_WAITQUEUE(wait, current); struct page *page; struct swap_info_struct *si = NULL; rmap_t rmap_flags = RMAP_NONE; @@ -4297,7 +4300,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * Relax a bit to prevent rapid * repeated page faults. */ + add_wait_queue(&swapcache_wq, &wait); schedule_timeout_uninterruptible(1); + remove_wait_queue(&swapcache_wq, &wait); goto out_page; } need_clear_cache = true; @@ -4604,8 +4609,11 @@ unlock: pte_unmap_unlock(vmf->pte, vmf->ptl); out: /* Clear the swap cache pin for direct swapin after PTL unlock */ - if (need_clear_cache) + if (need_clear_cache) { swapcache_clear(si, entry, nr_pages); + if (waitqueue_active(&swapcache_wq)) + wake_up(&swapcache_wq); + } if (si) put_swap_device(si); return ret; @@ -4620,8 +4628,11 @@ out_release: folio_unlock(swapcache); folio_put(swapcache); } - if (need_clear_cache) + if (need_clear_cache) { swapcache_clear(si, entry, nr_pages); + if (waitqueue_active(&swapcache_wq)) + wake_up(&swapcache_wq); + } if (si) put_swap_device(si); return ret; -- 2.51.0 From aa6f8b2593b56a02043684182a89853f919dff3e Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Fri, 18 Oct 2024 15:34:11 -0700 Subject: [PATCH 06/16] mm/gup: stop leaking pinned pages in low memory conditions If a driver tries to call any of the pin_user_pages*(FOLL_LONGTERM) family of functions, and requests "too many" pages, then the call will erroneously leave pages pinned. This is visible in user space as an actual memory leak. Repro is trivial: just make enough pin_user_pages(FOLL_LONGTERM) calls to exhaust memory. The root cause of the problem is this sequence, within __gup_longterm_locked(): __get_user_pages_locked() rc = check_and_migrate_movable_pages() ...which gets retried in a loop. The loop error handling is incomplete, clearly due to a somewhat unusual and complicated tri-state error API. But anyway, if -ENOMEM, or in fact, any unexpected error is returned from check_and_migrate_movable_pages(), then __gup_longterm_locked() happily returns the error, while leaving the pages pinned. In the failed case, which is an app that requests (via a device driver) 30720000000 bytes to be pinned, and then exits, I see this: $ grep foll /proc/vmstat nr_foll_pin_acquired 7502048 nr_foll_pin_released 2048 And after applying this patch, it returns to balanced pins: $ grep foll /proc/vmstat nr_foll_pin_acquired 7502048 nr_foll_pin_released 7502048 Note that the child routine, check_and_migrate_movable_folios(), avoids this problem, by unpinning any folios in the **folios argument, before returning an error. Fix this by making check_and_migrate_movable_pages() behave in exactly the same way as check_and_migrate_movable_folios(): unpin all pages in **pages, before returning an error. Also, documentation was an aggravating factor, so: 1) Consolidate the documentation for these two routines, now that they have identical external behavior. 2) Rewrite the consolidated documentation: a) Clearly list the three return code cases, and what happens in each case. b) Mention that one of the cases unpins the pages or folios, before returning an error code. Link: https://lkml.kernel.org/r/20241018223411.310331-1-jhubbard@nvidia.com Fixes: 24a95998e9ba ("mm/gup.c: simplify and fix check_and_migrate_movable_pages() return codes") Signed-off-by: John Hubbard Reviewed-by: Alistair Popple Suggested-by: David Hildenbrand Cc: Shigeru Yoshida Cc: Jason Gunthorpe Cc: Minchan Kim Cc: Pasha Tatashin Signed-off-by: Andrew Morton --- mm/gup.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index a82890b46a36..4637dab7b54f 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2394,20 +2394,25 @@ err: } /* - * Check whether all folios are *allowed* to be pinned indefinitely (longterm). + * Check whether all folios are *allowed* to be pinned indefinitely (long term). * Rather confusingly, all folios in the range are required to be pinned via * FOLL_PIN, before calling this routine. * - * If any folios in the range are not allowed to be pinned, then this routine - * will migrate those folios away, unpin all the folios in the range and return - * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then - * call this routine again. + * Return values: * - * If an error other than -EAGAIN occurs, this indicates a migration failure. - * The caller should give up, and propagate the error back up the call stack. - * - * If everything is OK and all folios in the range are allowed to be pinned, + * 0: if everything is OK and all folios in the range are allowed to be pinned, * then this routine leaves all folios pinned and returns zero for success. + * + * -EAGAIN: if any folios in the range are not allowed to be pinned, then this + * routine will migrate those folios away, unpin all the folios in the range. If + * migration of the entire set of folios succeeds, then -EAGAIN is returned. The + * caller should re-pin the entire range with FOLL_PIN and then call this + * routine again. + * + * -ENOMEM, or any other -errno: if an error *other* than -EAGAIN occurs, this + * indicates a migration failure. The caller should give up, and propagate the + * error back up the call stack. The caller does not need to unpin any folios in + * that case, because this routine will do the unpinning. */ static long check_and_migrate_movable_folios(unsigned long nr_folios, struct folio **folios) @@ -2425,10 +2430,8 @@ static long check_and_migrate_movable_folios(unsigned long nr_folios, } /* - * This routine just converts all the pages in the @pages array to folios and - * calls check_and_migrate_movable_folios() to do the heavy lifting. - * - * Please see the check_and_migrate_movable_folios() documentation for details. + * Return values and behavior are the same as those for + * check_and_migrate_movable_folios(). */ static long check_and_migrate_movable_pages(unsigned long nr_pages, struct page **pages) @@ -2437,8 +2440,10 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages, long i, ret; folios = kmalloc_array(nr_pages, sizeof(*folios), GFP_KERNEL); - if (!folios) + if (!folios) { + unpin_user_pages(pages, nr_pages); return -ENOMEM; + } for (i = 0; i < nr_pages; i++) folios[i] = page_folio(pages[i]); -- 2.51.0 From f4657e16e767105194f97586fe3c03d3f64c4d37 Mon Sep 17 00:00:00 2001 From: Hao Ge Date: Sun, 20 Oct 2024 15:08:19 +0800 Subject: [PATCH 07/16] mm/codetag: fix null pointer check logic for ref and tag When we compile and load lib/slub_kunit.c,it will cause a panic. The root cause is that __kmalloc_cache_noprof was directly called instead of kmem_cache_alloc,which resulted in no alloc_tag being allocated.This caused current->alloc_tag to be null,leading to a null pointer dereference in alloc_tag_ref_set. Despite the fact that my colleague Pei Xiao will later fix the code in slub_kunit.c,we still need fix null pointer check logic for ref and tag to avoid panic caused by a null pointer dereference. Here is the log for the panic: [ 74.779373][ T2158] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000020 [ 74.780130][ T2158] Mem abort info: [ 74.780406][ T2158] ESR = 0x0000000096000004 [ 74.780756][ T2158] EC = 0x25: DABT (current EL), IL = 32 bits [ 74.781225][ T2158] SET = 0, FnV = 0 [ 74.781529][ T2158] EA = 0, S1PTW = 0 [ 74.781836][ T2158] FSC = 0x04: level 0 translation fault [ 74.782288][ T2158] Data abort info: [ 74.782577][ T2158] ISV = 0, ISS = 0x00000004, ISS2 = 0x00000000 [ 74.783068][ T2158] CM = 0, WnR = 0, TnD = 0, TagAccess = 0 [ 74.783533][ T2158] GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0 [ 74.784010][ T2158] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000105f34000 [ 74.784586][ T2158] [0000000000000020] pgd=0000000000000000, p4d=0000000000000000 [ 74.785293][ T2158] Internal error: Oops: 0000000096000004 [#1] SMP [ 74.785805][ T2158] Modules linked in: slub_kunit kunit ip6t_rpfilter ip6t_REJECT nf_reject_ipv6 ipt_REJECT nf_reject_ipv4 xt_conntrack ebtable_nat ebtable_broute ip6table_nat ip6table_mangle 4 [ 74.790661][ T2158] CPU: 0 UID: 0 PID: 2158 Comm: kunit_try_catch Kdump: loaded Tainted: G W N 6.12.0-rc3+ #2 [ 74.791535][ T2158] Tainted: [W]=WARN, [N]=TEST [ 74.791889][ T2158] Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 [ 74.792479][ T2158] pstate: 40400005 (nZcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 74.793101][ T2158] pc : alloc_tagging_slab_alloc_hook+0x120/0x270 [ 74.793607][ T2158] lr : alloc_tagging_slab_alloc_hook+0x120/0x270 [ 74.794095][ T2158] sp : ffff800084d33cd0 [ 74.794418][ T2158] x29: ffff800084d33cd0 x28: 0000000000000000 x27: 0000000000000000 [ 74.795095][ T2158] x26: 0000000000000000 x25: 0000000000000012 x24: ffff80007b30e314 [ 74.795822][ T2158] x23: ffff000390ff6f10 x22: 0000000000000000 x21: 0000000000000088 [ 74.796555][ T2158] x20: ffff000390285840 x19: fffffd7fc3ef7830 x18: ffffffffffffffff [ 74.797283][ T2158] x17: ffff8000800e63b4 x16: ffff80007b33afc4 x15: ffff800081654c00 [ 74.798011][ T2158] x14: 0000000000000000 x13: 205d383531325420 x12: 5b5d383734363537 [ 74.798744][ T2158] x11: ffff800084d337e0 x10: 000000000000005d x9 : 00000000ffffffd0 [ 74.799476][ T2158] x8 : 7f7f7f7f7f7f7f7f x7 : ffff80008219d188 x6 : c0000000ffff7fff [ 74.800206][ T2158] x5 : ffff0003fdbc9208 x4 : ffff800081edd188 x3 : 0000000000000001 [ 74.800932][ T2158] x2 : 0beaa6dee1ac5a00 x1 : 0beaa6dee1ac5a00 x0 : ffff80037c2cb000 [ 74.801656][ T2158] Call trace: [ 74.801954][ T2158] alloc_tagging_slab_alloc_hook+0x120/0x270 [ 74.802494][ T2158] __kmalloc_cache_noprof+0x148/0x33c [ 74.802976][ T2158] test_kmalloc_redzone_access+0x4c/0x104 [slub_kunit] [ 74.803607][ T2158] kunit_try_run_case+0x70/0x17c [kunit] [ 74.804124][ T2158] kunit_generic_run_threadfn_adapter+0x2c/0x4c [kunit] [ 74.804768][ T2158] kthread+0x10c/0x118 [ 74.805141][ T2158] ret_from_fork+0x10/0x20 [ 74.805540][ T2158] Code: b9400a80 11000400 b9000a80 97ffd858 (f94012d3) [ 74.806176][ T2158] SMP: stopping secondary CPUs [ 74.808130][ T2158] Starting crashdump kernel... Link: https://lkml.kernel.org/r/20241020070819.307944-1-hao.ge@linux.dev Fixes: e0a955bf7f61 ("mm/codetag: add pgalloc_tag_copy()") Signed-off-by: Hao Ge Acked-by: Suren Baghdasaryan Suggested-by: Suren Baghdasaryan Acked-by: Yu Zhao Cc: Kent Overstreet Signed-off-by: Andrew Morton --- include/linux/alloc_tag.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 1f0a9ff23a2c..941deffc590d 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -135,18 +135,21 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {} #endif /* Caller should verify both ref and tag to be valid */ -static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) +static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) { alloc_tag_add_check(ref, tag); if (!ref || !tag) - return; + return false; ref->ct = &tag->ct; + return true; } -static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) +static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) { - __alloc_tag_ref_set(ref, tag); + if (unlikely(!__alloc_tag_ref_set(ref, tag))) + return false; + /* * We need in increment the call counter every time we have a new * allocation or when we split a large allocation into smaller ones. @@ -154,12 +157,13 @@ static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *t * counter because when we free each part the counter will be decremented. */ this_cpu_inc(tag->counters->calls); + return true; } static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes) { - alloc_tag_ref_set(ref, tag); - this_cpu_add(tag->counters->bytes, bytes); + if (likely(alloc_tag_ref_set(ref, tag))) + this_cpu_add(tag->counters->bytes, bytes); } static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) -- 2.51.0 From e0fc203748377835bbb4fb4c45174592214a3211 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Wed, 23 Oct 2024 13:12:36 -0400 Subject: [PATCH 08/16] mm: avoid VM_BUG_ON when try to map an anon large folio to zero page. An anonymous large folio can be split into non order-0 folios, try_to_map_unused_to_zeropage() should not VM_BUG_ON compound pages but just return false. This fixes the crash when splitting anonymous large folios to non order-0 folios. Link: https://lkml.kernel.org/r/20241023171236.1122535-1-ziy@nvidia.com Fixes: b1f202060afe ("mm: remap unused subpages to shared zeropage when splitting isolated thp") Signed-off-by: Zi Yan Acked-by: David Hildenbrand Acked-by: Usama Arif Cc: Barry Song Cc: Domenico Cerasuolo Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Matthew Wilcox Cc: Mike Rapoport (Microsoft) Cc: Nico Pache Cc: Rik van Riel Cc: Roman Gushchin Cc: Ryan Roberts Cc: Shakeel Butt Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/migrate.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/migrate.c b/mm/migrate.c index df91248755e4..7e520562d421 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -206,7 +206,8 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, pte_t newpte; void *addr; - VM_BUG_ON_PAGE(PageCompound(page), page); + if (PageCompound(page)) + return false; VM_BUG_ON_PAGE(!PageAnon(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page); -- 2.51.0 From b54e1bfecc4b2775c184d2edb319232b853a686d Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 24 Oct 2024 10:02:01 +1300 Subject: [PATCH 09/16] mm: fix PSWPIN counter for large folios swap-in Similar to PSWPOUT, we should count the number of base pages instead of large folios. Link: https://lkml.kernel.org/r/20241023210201.2798-1-21cnbao@gmail.com Fixes: 242d12c98174 ("mm: support large folios swap-in for sync io devices") Signed-off-by: Barry Song Acked-by: David Hildenbrand Reviewed-by: Baolin Wang Cc: Chris Li Cc: Yosry Ahmed Cc: "Huang, Ying" Cc: Kairui Song Cc: Ryan Roberts Cc: Kanchana P Sridhar Cc: Usama Arif Signed-off-by: Andrew Morton --- mm/page_io.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/page_io.c b/mm/page_io.c index 78bc88acee79..69536a2b3c13 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -570,7 +570,7 @@ static void swap_read_folio_bdev_sync(struct folio *folio, * attempt to access it in the page fault retry time check. */ get_task_struct(current); - count_vm_event(PSWPIN); + count_vm_events(PSWPIN, folio_nr_pages(folio)); submit_bio_wait(&bio); __end_swap_bio_read(&bio); put_task_struct(current); @@ -585,7 +585,7 @@ static void swap_read_folio_bdev_async(struct folio *folio, bio->bi_iter.bi_sector = swap_folio_sector(folio); bio->bi_end_io = end_swap_bio_read; bio_add_folio_nofail(bio, folio, folio_size(folio), 0); - count_vm_event(PSWPIN); + count_vm_events(PSWPIN, folio_nr_pages(folio)); submit_bio(bio); } -- 2.51.0 From 5168a68eb78fa1c67a8b2d31d0642c7fd866cc12 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Wed, 23 Oct 2024 01:55:12 +0800 Subject: [PATCH 10/16] mm, swap: avoid over reclaim of full clusters When running low on usable slots, cluster allocator will try to reclaim the full clusters aggressively to reclaim HAS_CACHE slots. This guarantees that as long as there are any usable slots, HAS_CACHE or not, the swap device will be usable and workload won't go OOM early. Before the cluster allocator, swap allocator fails easily if device is filled up with reclaimable HAS_CACHE slots. Which can be easily reproduced with following simple program: #include #include #include #include #define SIZE 8192UL * 1024UL * 1024UL int main(int argc, char **argv) { long tmp; char *p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); memset(p, 0, SIZE); madvise(p, SIZE, MADV_PAGEOUT); for (unsigned long i = 0; i < SIZE; ++i) tmp += p[i]; getchar(); /* Pause */ return 0; } Setup an 8G non ramdisk swap, the first run of the program will swapout 8G ram successfully. But run same program again after the first run paused, the second run can't swapout all 8G memory as now half of the swap device is pinned by HAS_CACHE. There was a random scan in the old allocator that may reclaim part of the HAS_CACHE by luck, but it's unreliable. The new allocator's added reclaim of full clusters when device is low on usable slots. But when multiple CPUs are seeing the device is low on usable slots at the same time, they ran into a thundering herd problem. This is an observable problem on large machine with mass parallel workload, as full cluster reclaim is slower on large swap device and higher number of CPUs will also make things worse. Testing using a 128G ZRAM on a 48c96t system. When the swap device is very close to full (eg. 124G / 128G), running build linux kernel with make -j96 in a 1G memory cgroup will hung (not a softlockup though) spinning in full cluster reclaim for about ~5min before go OOM. To solve this, split the full reclaim into two parts: - Instead of do a synchronous aggressively reclaim when device is low, do only one aggressively reclaim when device is strictly full with a kworker. This still ensures in worst case the device won't be unusable because of HAS_CACHE slots. - To avoid allocation (especially higher order) suffer from HAS_CACHE filling up clusters and kworker not responsive enough, do one synchronous scan every time the free list is drained, and only scan one cluster. This is kind of similar to the random reclaim before, keeps the full clusters rotated and has a minimal latency. This should provide a fair reclaim strategy suitable for most workloads. Link: https://lkml.kernel.org/r/20241022175512.10398-1-ryncsn@gmail.com Fixes: 2cacbdfdee65 ("mm: swap: add a adaptive full cluster cache reclaim") Signed-off-by: Kairui Song Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kalesh Singh Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 1 + mm/swapfile.c | 49 +++++++++++++++++++++++++++----------------- 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index ca533b478c21..f3e0ac20c2e8 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -335,6 +335,7 @@ struct swap_info_struct { * list. */ struct work_struct discard_work; /* discard worker */ + struct work_struct reclaim_work; /* reclaim worker */ struct list_head discard_clusters; /* discard clusters list */ struct plist_node avail_lists[]; /* * entries in swap_avail_heads, one diff --git a/mm/swapfile.c b/mm/swapfile.c index b0915f3fab31..46bd4b1a3c07 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -731,15 +731,16 @@ done: return offset; } -static void swap_reclaim_full_clusters(struct swap_info_struct *si) +/* Return true if reclaimed a whole cluster */ +static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) { long to_scan = 1; unsigned long offset, end; struct swap_cluster_info *ci; unsigned char *map = si->swap_map; - int nr_reclaim, total_reclaimed = 0; + int nr_reclaim; - if (atomic_long_read(&nr_swap_pages) <= SWAPFILE_CLUSTER) + if (force) to_scan = si->inuse_pages / SWAPFILE_CLUSTER; while (!list_empty(&si->full_clusters)) { @@ -749,28 +750,36 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si) end = min(si->max, offset + SWAPFILE_CLUSTER); to_scan--; + spin_unlock(&si->lock); while (offset < end) { if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) { - spin_unlock(&si->lock); nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT); - spin_lock(&si->lock); - if (nr_reclaim > 0) { - offset += nr_reclaim; - total_reclaimed += nr_reclaim; - continue; - } else if (nr_reclaim < 0) { - offset += -nr_reclaim; + if (nr_reclaim) { + offset += abs(nr_reclaim); continue; } } offset++; } - if (to_scan <= 0 || total_reclaimed) + spin_lock(&si->lock); + + if (to_scan <= 0) break; } } +static void swap_reclaim_work(struct work_struct *work) +{ + struct swap_info_struct *si; + + si = container_of(work, struct swap_info_struct, reclaim_work); + + spin_lock(&si->lock); + swap_reclaim_full_clusters(si, true); + spin_unlock(&si->lock); +} + /* * Try to get swap entries with specified order from current cpu's swap entry * pool (a cluster). This might involve allocating a new cluster for current CPU @@ -800,6 +809,10 @@ new_cluster: goto done; } + /* Try reclaim from full clusters if free clusters list is drained */ + if (vm_swap_full()) + swap_reclaim_full_clusters(si, false); + if (order < PMD_ORDER) { unsigned int frags = 0; @@ -881,13 +894,6 @@ new_cluster: } done: - /* Try reclaim from full clusters if device is nearfull */ - if (vm_swap_full() && (!found || (si->pages - si->inuse_pages) < SWAPFILE_CLUSTER)) { - swap_reclaim_full_clusters(si); - if (!found && !order && si->pages != si->inuse_pages) - goto new_cluster; - } - cluster->next[order] = offset; return found; } @@ -922,6 +928,9 @@ static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, si->lowest_bit = si->max; si->highest_bit = 0; del_from_avail_list(si); + + if (vm_swap_full()) + schedule_work(&si->reclaim_work); } } @@ -2816,6 +2825,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) wait_for_completion(&p->comp); flush_work(&p->discard_work); + flush_work(&p->reclaim_work); destroy_swap_extents(p); if (p->flags & SWP_CONTINUED) @@ -3376,6 +3386,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) return PTR_ERR(si); INIT_WORK(&si->discard_work, swap_discard_work); + INIT_WORK(&si->reclaim_work, swap_reclaim_work); name = getname(specialfile); if (IS_ERR(name)) { -- 2.51.0 From ece5897e5a10fcd56a317e32f2dc7219f366a5a8 Mon Sep 17 00:00:00 2001 From: Wladislav Wiebe Date: Tue, 22 Oct 2024 19:21:13 +0200 Subject: [PATCH 11/16] tools/mm: -Werror fixes in page-types/slabinfo MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Commit e6d2c436ff693 ("tools/mm: allow users to provide additional cflags/ldflags") passes now CFLAGS to Makefile. With this, build systems with default -Werror enabled found: slabinfo.c:1300:25: error: ignoring return value of 'chdir' declared with attribute 'warn_unused_result' [-Werror=unused-result]                          chdir("..");                          ^~~~~~~~~~~ page-types.c:397:35: error: format '%lu' expects argument of type 'long unsigned int', but argument 2 has type 'uint64_t' {aka 'long long unsigned int'} [-Werror=format=]                          printf("%lu\t", mapcnt0);                                  ~~^     ~~~~~~~ .. Fix page-types by using PRIu64 for uint64_t prints and check in slabinfo for return code on chdir(".."). Link: https://lkml.kernel.org/r/c1ceb507-94bc-461c-934d-c19b77edd825@gmail.com Fixes: e6d2c436ff69 ("tools/mm: allow users to provide additional cflags/ldflags") Signed-off-by: Wladislav Wiebe Cc: Vlastimil Babka Cc: Herton R. Krzesinski Cc: Signed-off-by: Andrew Morton --- tools/mm/page-types.c | 9 +++++---- tools/mm/slabinfo.c | 4 +++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/tools/mm/page-types.c b/tools/mm/page-types.c index fa050d5a48cd..6eb17cc1a06c 100644 --- a/tools/mm/page-types.c +++ b/tools/mm/page-types.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -391,9 +392,9 @@ static void show_page_range(unsigned long voffset, unsigned long offset, if (opt_file) printf("%lx\t", voff); if (opt_list_cgroup) - printf("@%llu\t", (unsigned long long)cgroup0); + printf("@%" PRIu64 "\t", cgroup0); if (opt_list_mapcnt) - printf("%lu\t", mapcnt0); + printf("%" PRIu64 "\t", mapcnt0); printf("%lx\t%lx\t%s\n", index, count, page_flag_name(flags0)); } @@ -419,9 +420,9 @@ static void show_page(unsigned long voffset, unsigned long offset, if (opt_file) printf("%lx\t", voffset); if (opt_list_cgroup) - printf("@%llu\t", (unsigned long long)cgroup); + printf("@%" PRIu64 "\t", cgroup) if (opt_list_mapcnt) - printf("%lu\t", mapcnt); + printf("%" PRIu64 "\t", mapcnt); printf("%lx\t%s\n", offset, page_flag_name(flags)); } diff --git a/tools/mm/slabinfo.c b/tools/mm/slabinfo.c index cfaeaea71042..04e9e6ba86ea 100644 --- a/tools/mm/slabinfo.c +++ b/tools/mm/slabinfo.c @@ -1297,7 +1297,9 @@ static void read_slab_dir(void) slab->cpu_partial_free = get_obj("cpu_partial_free"); slab->alloc_node_mismatch = get_obj("alloc_node_mismatch"); slab->deactivate_bypass = get_obj("deactivate_bypass"); - chdir(".."); + if (chdir("..")) + fatal("Unable to chdir from slab ../%s\n", + slab->name); if (slab->name[0] == ':') alias_targets++; slab++; -- 2.51.0 From 330d8df81f3673d6fb74550bbc9bb159d81b35f7 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Oct 2024 18:07:06 +0200 Subject: [PATCH 12/16] kasan: remove vmalloc_percpu test Commit 1a2473f0cbc0 ("kasan: improve vmalloc tests") added the vmalloc_percpu KASAN test with the assumption that __alloc_percpu always uses vmalloc internally, which is tagged by KASAN. However, __alloc_percpu might allocate memory from the first per-CPU chunk, which is not allocated via vmalloc(). As a result, the test might fail. Remove the test until proper KASAN annotation for the per-CPU allocated are added; tracked in https://bugzilla.kernel.org/show_bug.cgi?id=215019. Link: https://lkml.kernel.org/r/20241022160706.38943-1-andrey.konovalov@linux.dev Fixes: 1a2473f0cbc0 ("kasan: improve vmalloc tests") Signed-off-by: Andrey Konovalov Reported-by: Samuel Holland Link: https://lore.kernel.org/all/4a245fff-cc46-44d1-a5f9-fd2f1c3764ae@sifive.com/ Reported-by: Sabyrzhan Tasbolatov Link: https://lore.kernel.org/all/CACzwLxiWzNqPBp4C1VkaXZ2wDwvY3yZeetCi1TLGFipKW77drA@mail.gmail.com/ Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Cc: Sabyrzhan Tasbolatov Cc: Signed-off-by: Andrew Morton --- mm/kasan/kasan_test_c.c | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c index a181e4780d9d..d8fb281e439d 100644 --- a/mm/kasan/kasan_test_c.c +++ b/mm/kasan/kasan_test_c.c @@ -1810,32 +1810,6 @@ static void vm_map_ram_tags(struct kunit *test) free_pages((unsigned long)p_ptr, 1); } -static void vmalloc_percpu(struct kunit *test) -{ - char __percpu *ptr; - int cpu; - - /* - * This test is specifically crafted for the software tag-based mode, - * the only tag-based mode that poisons percpu mappings. - */ - KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS); - - ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); - - for_each_possible_cpu(cpu) { - char *c_ptr = per_cpu_ptr(ptr, cpu); - - KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN); - KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL); - - /* Make sure that in-bounds accesses don't crash the kernel. */ - *c_ptr = 0; - } - - free_percpu(ptr); -} - /* * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN, * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based @@ -2023,7 +1997,6 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(vmalloc_oob), KUNIT_CASE(vmap_tags), KUNIT_CASE(vm_map_ram_tags), - KUNIT_CASE(vmalloc_percpu), KUNIT_CASE(match_all_not_assigned), KUNIT_CASE(match_all_ptr_tag), KUNIT_CASE(match_all_mem_tag), -- 2.51.0 From d31638ff6c5437ca2968d6c22fb16524fd485013 Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Mon, 21 Oct 2024 00:22:00 +0100 Subject: [PATCH 13/16] Squashfs: fix variable overflow in squashfs_readpage_block Syzbot reports a slab out of bounds access in squashfs_readpage_block(). This is caused by an attempt to read page index 0x2000000000. This value (start_index) is stored in an integer loop variable which overflows producing a value of 0. This causes a loop which iterates over pages start_index -> end_index to iterate over 0 -> end_index, which ultimately causes an out of bounds page array access. Fix by changing variable to a loff_t, and rename to index to make it clearer it is a page index, and not a loop count. Link: https://lkml.kernel.org/r/20241020232200.837231-1-phillip@squashfs.org.uk Signed-off-by: Phillip Lougher Reported-by: "Lai, Yi" Closes: https://lore.kernel.org/all/ZwzcnCAosIPqQ9Ie@ly-workstation/ Signed-off-by: Andrew Morton --- fs/squashfs/file_direct.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c index 22251743fadf..d19d4db74af8 100644 --- a/fs/squashfs/file_direct.c +++ b/fs/squashfs/file_direct.c @@ -30,7 +30,8 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; loff_t start_index = folio->index & ~mask; loff_t end_index = start_index | mask; - int i, n, pages, bytes, res = -ENOMEM; + loff_t index; + int i, pages, bytes, res = -ENOMEM; struct page **page, *last_page; struct squashfs_page_actor *actor; void *pageaddr; @@ -45,9 +46,9 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, return res; /* Try to grab all the pages covered by the Squashfs block */ - for (i = 0, n = start_index; n <= end_index; n++) { - page[i] = (n == folio->index) ? target_page : - grab_cache_page_nowait(target_page->mapping, n); + for (i = 0, index = start_index; index <= end_index; index++) { + page[i] = (index == folio->index) ? target_page : + grab_cache_page_nowait(target_page->mapping, index); if (page[i] == NULL) continue; -- 2.51.0 From b3a033e3ecd3471248d474ef263aadc0059e516a Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 20 Oct 2024 13:51:28 +0900 Subject: [PATCH 14/16] nilfs2: fix potential deadlock with newly created symlinks Syzbot reported that page_symlink(), called by nilfs_symlink(), triggers memory reclamation involving the filesystem layer, which can result in circular lock dependencies among the reader/writer semaphore nilfs->ns_segctor_sem, s_writers percpu_rwsem (intwrite) and the fs_reclaim pseudo lock. This is because after commit 21fc61c73c39 ("don't put symlink bodies in pagecache into highmem"), the gfp flags of the page cache for symbolic links are overwritten to GFP_KERNEL via inode_nohighmem(). This is not a problem for symlinks read from the backing device, because the __GFP_FS flag is dropped after inode_nohighmem() is called. However, when a new symlink is created with nilfs_symlink(), the gfp flags remain overwritten to GFP_KERNEL. Then, memory allocation called from page_symlink() etc. triggers memory reclamation including the FS layer, which may call nilfs_evict_inode() or nilfs_dirty_inode(). And these can cause a deadlock if they are called while nilfs->ns_segctor_sem is held: Fix this issue by dropping the __GFP_FS flag from the page cache GFP flags of newly created symlinks in the same way that nilfs_new_inode() and __nilfs_read_inode() do, as a workaround until we adopt nofs allocation scope consistently or improve the locking constraints. Link: https://lkml.kernel.org/r/20241020050003.4308-1-konishi.ryusuke@gmail.com Fixes: 21fc61c73c39 ("don't put symlink bodies in pagecache into highmem") Signed-off-by: Ryusuke Konishi Reported-by: syzbot+9ef37ac20608f4836256@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=9ef37ac20608f4836256 Tested-by: syzbot+9ef37ac20608f4836256@syzkaller.appspotmail.com Cc: Signed-off-by: Andrew Morton --- fs/nilfs2/namei.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 4905063790c5..9b108052d9f7 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -157,6 +157,9 @@ static int nilfs_symlink(struct mnt_idmap *idmap, struct inode *dir, /* slow symlink */ inode->i_op = &nilfs_symlink_inode_operations; inode_nohighmem(inode); + mapping_set_gfp_mask(inode->i_mapping, + mapping_gfp_constraint(inode->i_mapping, + ~__GFP_FS)); inode->i_mapping->a_ops = &nilfs_aops; err = page_symlink(inode, symname, l); if (err) -- 2.51.0 From 9d08ec41a0645283d79a2e642205d488feaceacf Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Sat, 19 Oct 2024 22:22:12 -0600 Subject: [PATCH 15/16] mm: allow set/clear page_type again Some page flags (page->flags) were converted to page types (page->page_types). A recent example is PG_hugetlb. From the exclusive writer's perspective, e.g., a thread doing __folio_set_hugetlb(), there is a difference between the page flag and type APIs: the former allows the same non-atomic operation to be repeated whereas the latter does not. For example, calling __folio_set_hugetlb() twice triggers VM_BUG_ON_FOLIO(), since the second call expects the type (PG_hugetlb) not to be set previously. Using add_hugetlb_folio() as an example, it calls __folio_set_hugetlb() in the following error-handling path. And when that happens, it triggers the aforementioned VM_BUG_ON_FOLIO(). if (folio_test_hugetlb(folio)) { rc = hugetlb_vmemmap_restore_folio(h, folio); if (rc) { spin_lock_irq(&hugetlb_lock); add_hugetlb_folio(h, folio, false); ... It is possible to make hugeTLB comply with the new requirements from the page type API. However, a straightforward fix would be to just allow the same page type to be set or cleared again inside the API, to avoid any changes to its callers. Link: https://lkml.kernel.org/r/20241020042212.296781-1-yuzhao@google.com Fixes: d99e3140a4d3 ("mm: turn folio_test_hugetlb into a PageType") Signed-off-by: Yu Zhao Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 1b3a76710487..cc839e4365c1 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -975,12 +975,16 @@ static __always_inline bool folio_test_##fname(const struct folio *folio) \ } \ static __always_inline void __folio_set_##fname(struct folio *folio) \ { \ + if (folio_test_##fname(folio)) \ + return; \ VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \ folio); \ folio->page.page_type = (unsigned int)PGTY_##lname << 24; \ } \ static __always_inline void __folio_clear_##fname(struct folio *folio) \ { \ + if (folio->page.page_type == UINT_MAX) \ + return; \ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \ folio->page.page_type = UINT_MAX; \ } @@ -993,11 +997,15 @@ static __always_inline int Page##uname(const struct page *page) \ } \ static __always_inline void __SetPage##uname(struct page *page) \ { \ + if (Page##uname(page)) \ + return; \ VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \ page->page_type = (unsigned int)PGTY_##lname << 24; \ } \ static __always_inline void __ClearPage##uname(struct page *page) \ { \ + if (page->page_type == UINT_MAX) \ + return; \ VM_BUG_ON_PAGE(!Page##uname(page), page); \ page->page_type = UINT_MAX; \ } -- 2.51.0 From 85d16bceaf5d8112c9ffcfedd2f1bb9d0a1c1578 Mon Sep 17 00:00:00 2001 From: Jarkko Sakkinen Date: Fri, 25 Oct 2024 21:15:28 +0300 Subject: [PATCH 16/16] mailmap: update Jarkko's email addresses Remove my previous work email, and the new one. The previous was never used in the commit log, so there's no good reason to spare it. Link: https://lkml.kernel.org/r/20241025181530.6151-1-jarkko@kernel.org Signed-off-by: Jarkko Sakkinen Cc: Alex Elder Cc: David S. Miller Cc: Geliang Tang Cc: Jiri Kosina Cc: Kees Cook Cc: Matthieu Baerts (NGI0) Cc: Matt Ranostay Cc: Neeraj Upadhyay Cc: Quentin Monnet Signed-off-by: Andrew Morton --- .mailmap | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index 9a94c514e32c..5cf2fab5fe83 100644 --- a/.mailmap +++ b/.mailmap @@ -282,7 +282,7 @@ Jan Glauber Jan Kuliga Jarkko Sakkinen Jarkko Sakkinen -Jarkko Sakkinen +Jarkko Sakkinen Jason Gunthorpe Jason Gunthorpe Jason Gunthorpe -- 2.51.0