From: Liam R. Howlett Date: Tue, 28 Oct 2025 15:52:34 +0000 (-0400) Subject: mm/userfaultfd: Remove duplicate code in mfill copy default and shmem X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=866d002c83d663478ea2831410e5e7a0e608a481;p=users%2Fjedix%2Flinux-maple.git mm/userfaultfd: Remove duplicate code in mfill copy default and shmem Both shmem and anon have the same block of code. Extract it into a function. Signed-off-by: Liam R. Howlett --- diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 6d8d2e61bda9..fb26b5dc357b 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -155,6 +155,8 @@ extern int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start, extern long uffd_wp_range(struct vm_area_struct *vma, unsigned long start, unsigned long len, bool enable_wp); +inline int uffd_atomic_pte_copy(struct folio *folio, unsigned long src_addr); + /* move_pages */ void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2); void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2); diff --git a/mm/shmem.c b/mm/shmem.c index fe079e2c8f30..2a51fb8719e8 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3244,7 +3244,6 @@ static int shmem_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma, struct address_space *mapping = inode->i_mapping; gfp_t gfp = mapping_gfp_mask(mapping); pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); - void *page_kaddr; struct folio *folio; int ret; pmd_t *dst_pmd; @@ -3272,37 +3271,12 @@ static int shmem_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma, if (!folio) goto out_unacct_blocks; - page_kaddr = kmap_local_folio(folio, 0); - /* - * The read mmap_lock is held here. Despite the - * mmap_lock being read recursive a deadlock is still - * possible if a writer has taken a lock. For example: - * - * process A thread 1 takes read lock on own mmap_lock - * process A thread 2 calls mmap, blocks taking write lock - * process B thread 1 takes page fault, read lock on own mmap lock - * process B thread 2 calls mmap, blocks taking write lock - * process A thread 1 blocks taking read lock on process B - * process B thread 1 blocks taking read lock on process A - * - * Disable page faults to prevent potential deadlock - * and retry the copy outside the mmap_lock. - */ - pagefault_disable(); - ret = copy_from_user(page_kaddr, (const void __user *)src_addr, - PAGE_SIZE); - pagefault_enable(); - kunmap_local(page_kaddr); - - /* fallback to copy_from_user outside mmap_lock */ - if (unlikely(ret)) { + ret = uffd_atomic_pte_copy(folio, src_addr); + if (ret) { *foliop = folio; - ret = -ENOENT; - /* don't free the page */ goto out_unacct_blocks; } - flush_dcache_folio(folio); } else { folio = *foliop; VM_BUG_ON_FOLIO(folio_test_large(folio), folio); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 3d4c92fcba60..044334323400 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -234,12 +234,46 @@ out: return ret; } +inline int uffd_atomic_pte_copy(struct folio *folio, unsigned long src_addr) +{ + int ret; + void *kaddr; + + kaddr = kmap_local_folio(folio, 0); + /* + * The read mmap_lock is held here. Despite the + * mmap_lock being read recursive a deadlock is still + * possible if a writer has taken a lock. For example: + * + * process A thread 1 takes read lock on own mmap_lock + * process A thread 2 calls mmap, blocks taking write lock + * process B thread 1 takes page fault, read lock on own mmap lock + * process B thread 2 calls mmap, blocks taking write lock + * process A thread 1 blocks taking read lock on process B + * process B thread 1 blocks taking read lock on process A + * + * Disable page faults to prevent potential deadlock + * and retry the copy outside the mmap_lock. + */ + pagefault_disable(); + ret = copy_from_user(kaddr, (const void __user *) src_addr, + PAGE_SIZE); + pagefault_enable(); + kunmap_local(kaddr); + + /* fallback to copy_from_user outside mmap_lock */ + if (unlikely(ret)) + return -ENOENT; /* don't free the page */ + + flush_dcache_folio(folio); + return 0; +} + int mfill_atomic_pte_copy(struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, struct folio **foliop, unsigned long increment) { - void *kaddr; int ret; struct folio *folio; pmd_t *dst_pmd; @@ -255,37 +289,12 @@ int mfill_atomic_pte_copy(struct vm_area_struct *dst_vma, if (!folio) goto out; - kaddr = kmap_local_folio(folio, 0); - /* - * The read mmap_lock is held here. Despite the - * mmap_lock being read recursive a deadlock is still - * possible if a writer has taken a lock. For example: - * - * process A thread 1 takes read lock on own mmap_lock - * process A thread 2 calls mmap, blocks taking write lock - * process B thread 1 takes page fault, read lock on own mmap lock - * process B thread 2 calls mmap, blocks taking write lock - * process A thread 1 blocks taking read lock on process B - * process B thread 1 blocks taking read lock on process A - * - * Disable page faults to prevent potential deadlock - * and retry the copy outside the mmap_lock. - */ - pagefault_disable(); - ret = copy_from_user(kaddr, (const void __user *) src_addr, - PAGE_SIZE); - pagefault_enable(); - kunmap_local(kaddr); - - /* fallback to copy_from_user outside mmap_lock */ - if (unlikely(ret)) { - ret = -ENOENT; + ret = uffd_atomic_pte_copy(folio, src_addr); + if (ret) { *foliop = folio; - /* don't free the page */ goto out; } - flush_dcache_folio(folio); } else { folio = *foliop; *foliop = NULL;