]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Inline mfill_atomic() in mfill_atomic_copy()
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 29 Oct 2025 19:27:45 +0000 (15:27 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:58:50 +0000 (12:58 -0400)
mfill_atomic_copy() is special in regards to handling of folios and
retries.  Inline the mfill_atomic() and making a special loop for copy
simplifies the code for poison, zerocopy and continue.

Having a specific caller means that the uffd_flags_t can be dropped in
favour of a boolean for write protect.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
fs/userfaultfd.c
include/linux/userfaultfd_k.h
mm/userfaultfd.c

index 54c6cc7fe9c629f51d2f0da90e46404eda3d1138..1b7d6dae53be5dad61b638b1e0da262f2d2a3e10 100644 (file)
@@ -1589,7 +1589,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
        struct uffdio_copy uffdio_copy;
        struct uffdio_copy __user *user_uffdio_copy;
        struct userfaultfd_wake_range range;
-       uffd_flags_t flags = 0;
+       bool wp = false;
 
        user_uffdio_copy = (struct uffdio_copy __user *) arg;
 
@@ -1618,10 +1618,10 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
        if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
                goto out;
        if (uffdio_copy.mode & UFFDIO_COPY_MODE_WP)
-               flags |= MFILL_ATOMIC_WP;
+               wp = true;
        if (mmget_not_zero(ctx->mm)) {
                ret = mfill_atomic_copy(ctx, uffdio_copy.dst, uffdio_copy.src,
-                                       uffdio_copy.len, flags);
+                                       uffdio_copy.len, wp);
                mmput(ctx->mm);
        } else {
                return -ESRCH;
index e9965e1808bc4c6f8699c6f5f7734359bac60d16..34539a142d388e694a3329bf23d6895b3c89eb64 100644 (file)
@@ -165,9 +165,8 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, struct vm_area_struct *dst_vma,
                unsigned long dst_addr, struct page *page,
                bool newly_allocated, bool wp);
 
-extern ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
-                                unsigned long src_start, unsigned long len,
-                                uffd_flags_t flags);
+ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
+               unsigned long src_start, unsigned long len, bool wp);
 extern ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
                                     unsigned long dst_start,
                                     unsigned long len);
index 0e9ffe87f4dd07dbc892a0aedab5d2042ed9ee9b..e9b6e3e1882aee9e216c189d956312f0daf584e7 100644 (file)
@@ -694,16 +694,6 @@ static __always_inline ssize_t mfill_loop(struct uffd_info *info)
                VM_WARN_ON_ONCE(info->dst_addr >= info->dst_last);
                err = info->op(info);
                cond_resched();
-               if (unlikely(err == -ENOENT)) {
-                       err = info->uffd_ops->failed_do_unlock(info);
-                       if (unlikely(err))
-                               return err; /* Unlocked already */
-
-                       return -ENOENT;
-               } else {
-                       VM_WARN_ON_ONCE(info->foliop);
-               }
-
                if (!err) {
                        uffd_info_inc(info);
                        if (fatal_signal_pending(current))
@@ -726,8 +716,6 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
                                                 flags & MFILL_ATOMIC_WP);
 
        mfill_atomic_validate(&info);
-retry:
-
        err = mfill_setup(&info);
        if (err)
                goto out;
@@ -739,24 +727,74 @@ retry:
        }
 
        err = mfill_loop(&info);
-       if (unlikely(err == -ENOENT))
-               goto retry;
 
 out:
-       if (info.foliop)
-               folio_put(info.foliop);
        VM_WARN_ON_ONCE(info.copied < 0);
        VM_WARN_ON_ONCE(err > 0);
        VM_WARN_ON_ONCE(!info.copied && !err);
        return info.copied ? info.copied : err;
 }
 
+static __always_inline ssize_t mfill_copy_loop(struct uffd_info *info)
+{
+       ssize_t err = -EINVAL;
+
+       if (!info->op)
+               goto out_unlock;
+
+       err = 0;
+       while (!err && (info->src_addr < info->src_last)) {
+               VM_WARN_ON_ONCE(info->dst_addr >= info->dst_last);
+               err = info->op(info);
+               cond_resched();
+               if (unlikely(err == -ENOENT)) {
+                       err = info->uffd_ops->failed_do_unlock(info);
+                       if (unlikely(err))
+                               return err; /* Unlocked already */
+
+                       return -ENOENT;
+               } else {
+                       VM_WARN_ON_ONCE(info->foliop);
+               }
+
+               if (!err) {
+                       uffd_info_inc(info);
+                       if (fatal_signal_pending(current))
+                               err = -EINTR;
+               }
+       }
+
+out_unlock:
+       uffd_ctx_unlock(info);
+       return err;
+}
+
 ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
-                         unsigned long src_start, unsigned long len,
-                         uffd_flags_t flags)
+               unsigned long src_start, unsigned long len, bool wp)
 {
-       return mfill_atomic(ctx, dst_start, src_start, len,
-                           uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY));
+       ssize_t err;
+       struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len, ctx,
+                                                wp);
+
+       mfill_atomic_validate(&info);
+retry:
+
+       err = mfill_setup(&info);
+       if (err)
+               goto out;
+
+       info.op = info.uffd_ops->copy;
+       err = mfill_copy_loop(&info);
+       if (unlikely(err == -ENOENT))
+               goto retry;
+
+out:
+       if (info.foliop)
+               folio_put(info.foliop);
+       VM_WARN_ON_ONCE(info.copied < 0);
+       VM_WARN_ON_ONCE(err > 0);
+       VM_WARN_ON_ONCE(!info.copied && !err);
+       return info.copied ? info.copied : err;
 }
 
 ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,