]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Inline mfill_atomic into continue implementation
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 01:20:44 +0000 (21:20 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:59:46 +0000 (12:59 -0400)
continue can use the normal mfill_atomic() loop, but inlining means that
the uffd_ops->op setup can be removed.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
fs/userfaultfd.c
include/linux/userfaultfd_k.h
mm/userfaultfd.c

index 1b7d6dae53be5dad61b638b1e0da262f2d2a3e10..972c6f75b5504f7c2768363eb0d578fb42c54945 100644 (file)
@@ -1754,7 +1754,7 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
        struct uffdio_continue uffdio_continue;
        struct uffdio_continue __user *user_uffdio_continue;
        struct userfaultfd_wake_range range;
-       uffd_flags_t flags = 0;
+       bool wp = false;
 
        user_uffdio_continue = (struct uffdio_continue __user *)arg;
 
@@ -1781,11 +1781,11 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
                                     UFFDIO_CONTINUE_MODE_WP))
                goto out;
        if (uffdio_continue.mode & UFFDIO_CONTINUE_MODE_WP)
-               flags |= MFILL_ATOMIC_WP;
+               wp = true;
 
        if (mmget_not_zero(ctx->mm)) {
                ret = mfill_atomic_continue(ctx, uffdio_continue.range.start,
-                                           uffdio_continue.range.len, flags);
+                                           uffdio_continue.range.len, wp);
                mmput(ctx->mm);
        } else {
                return -ESRCH;
index c3e765dadfdb830c5e98757c6d83171b78119d65..4154fad2af55ad73a410c83d86ad476a319eeb41 100644 (file)
@@ -169,8 +169,8 @@ ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
                unsigned long src_start, unsigned long len, bool wp);
 ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
                unsigned long dst_start, unsigned long len);
-extern ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long dst_start,
-                                    unsigned long len, uffd_flags_t flags);
+ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx,
+               unsigned long dst_start, unsigned long len, bool wp);
 extern ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
                                   unsigned long len, uffd_flags_t flags);
 extern int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
index 1d1b70f914d30c1c9ddcb72c5e82dbc31b827c90..0b4df148881120d6603b4d9f9f3a95aec982ad94 100644 (file)
@@ -824,10 +824,13 @@ out:
        return info.copied ? info.copied : err;
 }
 
-ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start,
-                             unsigned long len, uffd_flags_t flags)
+ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx,
+               unsigned long dst_start, unsigned long len, bool wp)
 {
 
+       ssize_t err;
+       struct uffd_info info = UFFD_STRUCT_INIT(dst_start, 0, len, ctx,
+                                                wp);
        /*
         * A caller might reasonably assume that UFFDIO_CONTINUE contains an
         * smp_wmb() to ensure that any writes to the about-to-be-mapped page by
@@ -835,9 +838,19 @@ ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start,
         * subsequent loads from the page through the newly mapped address range.
         */
        smp_wmb();
+       mfill_atomic_validate(&info);
+       err = mfill_setup(&info);
+       if (err)
+               goto out;
 
-       return mfill_atomic(ctx, start, 0, len,
-                           uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
+       info.op = info.uffd_ops->cont;
+       err = mfill_loop(&info);
+
+out:
+       VM_WARN_ON_ONCE(info.copied < 0);
+       VM_WARN_ON_ONCE(err > 0);
+       VM_WARN_ON_ONCE(!info.copied && !err);
+       return info.copied ? info.copied : err;
 }
 
 ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,