From: Liam R. Howlett Date: Fri, 24 Oct 2025 03:32:05 +0000 (-0400) Subject: mm/userfaultfd: Use uffd_ops continue operations X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=c0c38b9d60752a5398c31a1d9d46866a6f0fef23;p=users%2Fjedix%2Flinux-maple.git mm/userfaultfd: Use uffd_ops continue operations Add the uffd_ops continue operation for all memory types. Start using the uffd_ops cont operation in mfill_atomic(). The further unites hugetlb with other types. Signed-off-by: Liam R. Howlett --- diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index fee5a904fbd0..54fc7a589b11 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -98,6 +98,8 @@ ssize_t uffd_failed_do_unlock(struct userfaultfd_ctx *ctx, unsigned long src_addr); struct vm_uffd_ops { + int (*cont)(struct vm_area_struct *dst_vma, unsigned long dst_addr, + uffd_flags_t flags, unsigned long increment); int (*poison)(struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long increment); @@ -233,6 +235,10 @@ static inline unsigned long mfill_size(struct vm_area_struct *vma) int mfill_atomic_pte_poison(struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long increment); +int mfill_atomic_pte_continue(struct vm_area_struct *dst_vma, + unsigned long dst_addr, uffd_flags_t flags, + unsigned long increment); + static inline bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags, bool wp_async) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 78e9affb82c6..444ab261e03d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5532,7 +5532,12 @@ static ssize_t hugetlb_failed_do_unlock(struct userfaultfd_ctx *ctx, static int hugetlb_mfill_pte_poison(struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long increment); +static int hugetlb_mfill_pte_continue(struct vm_area_struct *dst_vma, + unsigned long dst_addr, uffd_flags_t flags, + unsigned long increment); + static const struct vm_uffd_ops hugetlb_uffd_ops = { + .cont = hugetlb_mfill_pte_continue, .poison = hugetlb_mfill_pte_poison, .is_dst_valid = hugetlb_is_dst_valid, .increment = hugetlb_mfill_size, @@ -7153,7 +7158,6 @@ int hugetlb_mfill_atomic_pte(struct vm_area_struct *dst_vma, unsigned long increment) { struct mm_struct *dst_mm = dst_vma->vm_mm; - bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); bool wp_enabled = (flags & MFILL_ATOMIC_WP); struct hstate *h = hstate_vma(dst_vma); struct address_space *mapping = dst_vma->vm_file->f_mapping; @@ -7168,10 +7172,6 @@ int hugetlb_mfill_atomic_pte(struct vm_area_struct *dst_vma, bool folio_in_pagecache = false; u32 hash; - if (is_continue) - return hugetlb_mfill_pte_continue(dst_vma, dst_addr, flags, - increment); - ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment, &dst_pte, &hash, flags); if (ret) diff --git a/mm/shmem.c b/mm/shmem.c index c1816a582e48..923e9521bf6c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -5208,6 +5208,7 @@ static int shmem_error_remove_folio(struct address_space *mapping, #ifdef CONFIG_USERFAULTFD static const struct vm_uffd_ops shmem_uffd_ops = { + .cont = mfill_atomic_pte_continue, .poison = mfill_atomic_pte_poison, .is_dst_valid = shmem_is_dst_valid, .increment = mfill_size, diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 0891c5dee2d4..48deecc5fc98 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -385,9 +385,9 @@ out: } /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ -static int mfill_atomic_pte_continue( struct vm_area_struct *dst_vma, - unsigned long dst_addr, - uffd_flags_t flags) +int mfill_atomic_pte_continue(struct vm_area_struct *dst_vma, + unsigned long dst_addr, uffd_flags_t flags, + unsigned long increment) { struct inode *inode = file_inode(dst_vma->vm_file); pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); @@ -507,6 +507,7 @@ static ssize_t uffd_def_is_dst_valid(struct vm_area_struct *dst_vma, /* Anon vma ops */ static const struct vm_uffd_ops default_uffd_ops = { + .cont = mfill_atomic_pte_continue, .poison = mfill_atomic_pte_poison, .is_dst_valid = uffd_def_is_dst_valid, .increment = mfill_size, @@ -687,12 +688,12 @@ retry: */ if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { err = uffd_ops->poison(dst_vma, dst_addr, increment); + } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) { + err = uffd_ops->cont(dst_vma, dst_addr, flags, + increment); } else if (is_vm_hugetlb_page(dst_vma)) { err = hugetlb_mfill_atomic_pte(dst_vma, dst_addr, src_addr, flags, &folio, increment); - } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) { - err = mfill_atomic_pte_continue(dst_vma, dst_addr, - flags); } else if (!(dst_vma->vm_flags & VM_SHARED)) { /* * The normal page fault path for a shmem will invoke