From: Liam R. Howlett Date: Thu, 30 Oct 2025 01:20:44 +0000 (-0400) Subject: mm/userfaultfd: Inline mfill_atomic into continue implementation X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=d20facf4ce16eabf35011f415ab5863255f23212;p=users%2Fjedix%2Flinux-maple.git mm/userfaultfd: Inline mfill_atomic into continue implementation continue can use the normal mfill_atomic() loop, but inlining means that the uffd_ops->op setup can be removed. Signed-off-by: Liam R. Howlett --- diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 1b7d6dae53be..972c6f75b550 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1754,7 +1754,7 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) struct uffdio_continue uffdio_continue; struct uffdio_continue __user *user_uffdio_continue; struct userfaultfd_wake_range range; - uffd_flags_t flags = 0; + bool wp = false; user_uffdio_continue = (struct uffdio_continue __user *)arg; @@ -1781,11 +1781,11 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) UFFDIO_CONTINUE_MODE_WP)) goto out; if (uffdio_continue.mode & UFFDIO_CONTINUE_MODE_WP) - flags |= MFILL_ATOMIC_WP; + wp = true; if (mmget_not_zero(ctx->mm)) { ret = mfill_atomic_continue(ctx, uffdio_continue.range.start, - uffdio_continue.range.len, flags); + uffdio_continue.range.len, wp); mmput(ctx->mm); } else { return -ESRCH; diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index c3e765dadfdb..4154fad2af55 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -169,8 +169,8 @@ ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long src_start, unsigned long len, bool wp); ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long len); -extern ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long dst_start, - unsigned long len, uffd_flags_t flags); +ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, + unsigned long dst_start, unsigned long len, bool wp); extern ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start, unsigned long len, uffd_flags_t flags); extern int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start, diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 1d1b70f914d3..0b4df1488811 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -824,10 +824,13 @@ out: return info.copied ? info.copied : err; } -ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start, - unsigned long len, uffd_flags_t flags) +ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, + unsigned long dst_start, unsigned long len, bool wp) { + ssize_t err; + struct uffd_info info = UFFD_STRUCT_INIT(dst_start, 0, len, ctx, + wp); /* * A caller might reasonably assume that UFFDIO_CONTINUE contains an * smp_wmb() to ensure that any writes to the about-to-be-mapped page by @@ -835,9 +838,19 @@ ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start, * subsequent loads from the page through the newly mapped address range. */ smp_wmb(); + mfill_atomic_validate(&info); + err = mfill_setup(&info); + if (err) + goto out; - return mfill_atomic(ctx, start, 0, len, - uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE)); + info.op = info.uffd_ops->cont; + err = mfill_loop(&info); + +out: + VM_WARN_ON_ONCE(info.copied < 0); + VM_WARN_ON_ONCE(err > 0); + VM_WARN_ON_ONCE(!info.copied && !err); + return info.copied ? info.copied : err; } ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,