From: Liam R. Howlett Date: Wed, 29 Oct 2025 07:50:52 +0000 (-0400) Subject: mm/userfaultfd: Move mfill_atomic_pte_continue() to mm/shmem.c X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=947ed0cf3f93038a27e72ba45f1a07e90646aee8;p=users%2Fjedix%2Flinux-maple.git mm/userfaultfd: Move mfill_atomic_pte_continue() to mm/shmem.c continue is only valid for shmem, so move it there to better modularize the memory types. Signed-off-by: Liam R. Howlett --- diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 89f1ace2dd18..ef93429dd13e 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -196,19 +196,6 @@ static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof) extern bool shmem_charge(struct inode *inode, long pages); extern void shmem_uncharge(struct inode *inode, long pages); -#ifdef CONFIG_USERFAULTFD -#ifdef CONFIG_SHMEM -extern int shmem_mfill_atomic_pte(struct vm_area_struct *dst_vma, - unsigned long dst_addr, - unsigned long src_addr, - uffd_flags_t flags, - struct folio **foliop); -#else /* !CONFIG_SHMEM */ -#define shmem_mfill_atomic_pte(dst_vma, dst_addr, \ - src_addr, flags, foliop) ({ BUG(); 0; }) -#endif /* CONFIG_SHMEM */ -#endif /* CONFIG_USERFAULTFD */ - /* * Used space is stored as unsigned 64-bit value in bytes but * quota core supports only signed 64-bit values so use that diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 311ce37b9159..114f9257d240 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -256,7 +256,6 @@ static inline unsigned long mfill_size(struct vm_area_struct *vma) return PAGE_SIZE; } int mfill_atomic_pte_poison(struct uffd_info *info); -int mfill_atomic_pte_continue(struct uffd_info *info); int mfill_atomic_pte_copy(struct uffd_info *info); int mfill_atomic_pte_zeropage(struct uffd_info *info); diff --git a/mm/shmem.c b/mm/shmem.c index 0832b910c6cf..165b428a661e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3235,6 +3235,53 @@ static int shmem_mfill_atomic_pte_zeropage(struct uffd_info *u_info) return ret; } +/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ +static int shmem_mfill_atomic_pte_continue(struct uffd_info *info) +{ + struct vm_area_struct *dst_vma = info->dst_vma; + struct inode *inode = file_inode(dst_vma->vm_file); + pgoff_t pgoff = linear_page_index(dst_vma, info->dst_addr); + pmd_t *dst_pmd; + struct folio *folio; + struct page *page; + int ret; + + ret = uffd_get_dst_pmd(dst_vma, info->dst_addr, &dst_pmd); + if (ret) + return ret; + + ret = shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC); + /* Our caller expects us to return -EFAULT if we failed to find folio */ + if (ret == -ENOENT) + ret = -EFAULT; + if (ret) + goto out; + if (!folio) { + ret = -EFAULT; + goto out; + } + + page = folio_file_page(folio, pgoff); + if (PageHWPoison(page)) { + ret = -EIO; + goto out_release; + } + + ret = mfill_atomic_install_pte(dst_pmd, dst_vma, info->dst_addr, + page, false, info->wp); + if (ret) + goto out_release; + + folio_unlock(folio); + ret = 0; +out: + return ret; +out_release: + folio_unlock(folio); + folio_put(folio); + goto out; +} + static int shmem_mfill_atomic_pte_copy(struct uffd_info *u_info) { struct vm_area_struct *dst_vma = u_info->dst_vma; @@ -5224,7 +5271,7 @@ static int shmem_error_remove_folio(struct address_space *mapping, static const struct vm_uffd_ops shmem_uffd_ops = { .copy = shmem_mfill_atomic_pte_copy, .zeropage = shmem_mfill_atomic_pte_zeropage, - .cont = mfill_atomic_pte_continue, + .cont = shmem_mfill_atomic_pte_continue, .poison = mfill_atomic_pte_poison, .is_dst_valid = shmem_is_dst_valid, .increment = mfill_size, diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 1ec0a862bbb3..7fd74ae2b809 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include "internal.h" @@ -396,53 +395,6 @@ out: return ret; } -/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ -int mfill_atomic_pte_continue(struct uffd_info *info) -{ - struct vm_area_struct *dst_vma = info->dst_vma; - struct inode *inode = file_inode(dst_vma->vm_file); - pgoff_t pgoff = linear_page_index(dst_vma, info->dst_addr); - pmd_t *dst_pmd; - struct folio *folio; - struct page *page; - int ret; - - ret = uffd_get_dst_pmd(dst_vma, info->dst_addr, &dst_pmd); - if (ret) - return ret; - - ret = shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC); - /* Our caller expects us to return -EFAULT if we failed to find folio */ - if (ret == -ENOENT) - ret = -EFAULT; - if (ret) - goto out; - if (!folio) { - ret = -EFAULT; - goto out; - } - - page = folio_file_page(folio, pgoff); - if (PageHWPoison(page)) { - ret = -EIO; - goto out_release; - } - - ret = mfill_atomic_install_pte(dst_pmd, dst_vma, info->dst_addr, - page, false, info->wp); - if (ret) - goto out_release; - - folio_unlock(folio); - ret = 0; -out: - return ret; -out_release: - folio_unlock(folio); - folio_put(folio); - goto out; -} - /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */ int mfill_atomic_pte_poison(struct uffd_info *info) {