From d674df9ec2c13af14e5cae96afa00603bad46ea8 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Wed, 29 Oct 2025 15:27:45 -0400 Subject: [PATCH] mm/userfaultfd: Inline mfill_atomic() in mfill_atomic_copy() mfill_atomic_copy() is special in regards to handling of folios and retries. Inline the mfill_atomic() and making a special loop for copy simplifies the code for poison, zerocopy and continue. Having a specific caller means that the uffd_flags_t can be dropped in favour of a boolean for write protect. Signed-off-by: Liam R. Howlett --- fs/userfaultfd.c | 6 +-- include/linux/userfaultfd_k.h | 5 +-- mm/userfaultfd.c | 78 ++++++++++++++++++++++++++--------- 3 files changed, 63 insertions(+), 26 deletions(-) diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 54c6cc7fe9c6..1b7d6dae53be 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1589,7 +1589,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, struct uffdio_copy uffdio_copy; struct uffdio_copy __user *user_uffdio_copy; struct userfaultfd_wake_range range; - uffd_flags_t flags = 0; + bool wp = false; user_uffdio_copy = (struct uffdio_copy __user *) arg; @@ -1618,10 +1618,10 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP)) goto out; if (uffdio_copy.mode & UFFDIO_COPY_MODE_WP) - flags |= MFILL_ATOMIC_WP; + wp = true; if (mmget_not_zero(ctx->mm)) { ret = mfill_atomic_copy(ctx, uffdio_copy.dst, uffdio_copy.src, - uffdio_copy.len, flags); + uffdio_copy.len, wp); mmput(ctx->mm); } else { return -ESRCH; diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index e9965e1808bc..34539a142d38 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -165,9 +165,8 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, struct page *page, bool newly_allocated, bool wp); -extern ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start, - unsigned long src_start, unsigned long len, - uffd_flags_t flags); +ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start, + unsigned long src_start, unsigned long len, bool wp); extern ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long len); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 0e9ffe87f4dd..e9b6e3e1882a 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -694,16 +694,6 @@ static __always_inline ssize_t mfill_loop(struct uffd_info *info) VM_WARN_ON_ONCE(info->dst_addr >= info->dst_last); err = info->op(info); cond_resched(); - if (unlikely(err == -ENOENT)) { - err = info->uffd_ops->failed_do_unlock(info); - if (unlikely(err)) - return err; /* Unlocked already */ - - return -ENOENT; - } else { - VM_WARN_ON_ONCE(info->foliop); - } - if (!err) { uffd_info_inc(info); if (fatal_signal_pending(current)) @@ -726,8 +716,6 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx, flags & MFILL_ATOMIC_WP); mfill_atomic_validate(&info); -retry: - err = mfill_setup(&info); if (err) goto out; @@ -739,24 +727,74 @@ retry: } err = mfill_loop(&info); - if (unlikely(err == -ENOENT)) - goto retry; out: - if (info.foliop) - folio_put(info.foliop); VM_WARN_ON_ONCE(info.copied < 0); VM_WARN_ON_ONCE(err > 0); VM_WARN_ON_ONCE(!info.copied && !err); return info.copied ? info.copied : err; } +static __always_inline ssize_t mfill_copy_loop(struct uffd_info *info) +{ + ssize_t err = -EINVAL; + + if (!info->op) + goto out_unlock; + + err = 0; + while (!err && (info->src_addr < info->src_last)) { + VM_WARN_ON_ONCE(info->dst_addr >= info->dst_last); + err = info->op(info); + cond_resched(); + if (unlikely(err == -ENOENT)) { + err = info->uffd_ops->failed_do_unlock(info); + if (unlikely(err)) + return err; /* Unlocked already */ + + return -ENOENT; + } else { + VM_WARN_ON_ONCE(info->foliop); + } + + if (!err) { + uffd_info_inc(info); + if (fatal_signal_pending(current)) + err = -EINTR; + } + } + +out_unlock: + uffd_ctx_unlock(info); + return err; +} + ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start, - unsigned long src_start, unsigned long len, - uffd_flags_t flags) + unsigned long src_start, unsigned long len, bool wp) { - return mfill_atomic(ctx, dst_start, src_start, len, - uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY)); + ssize_t err; + struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len, ctx, + wp); + + mfill_atomic_validate(&info); +retry: + + err = mfill_setup(&info); + if (err) + goto out; + + info.op = info.uffd_ops->copy; + err = mfill_copy_loop(&info); + if (unlikely(err == -ENOENT)) + goto retry; + +out: + if (info.foliop) + folio_put(info.foliop); + VM_WARN_ON_ONCE(info.copied < 0); + VM_WARN_ON_ONCE(err > 0); + VM_WARN_ON_ONCE(!info.copied && !err); + return info.copied ? info.copied : err; } ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx, -- 2.51.0