]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Break up mfill_atomic() around retry and looping
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 29 Oct 2025 19:17:35 +0000 (15:17 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:58:43 +0000 (12:58 -0400)
mfill_atomic_copy is special and has a strange error on -ENOENT to retry,
while mfill_atomic_continue masks -ENOENTRY

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
include/linux/userfaultfd_k.h
mm/userfaultfd.c

index fc4b15b6fc29e74382f947a47a91950b615715fe..e9965e1808bc4c6f8699c6f5f7734359bac60d16 100644 (file)
@@ -96,6 +96,8 @@ struct uffd_info {
        unsigned long dst_addr;                 /* Target address */
        unsigned long src_addr;                 /* Source address */
        unsigned long len;                      /* Total length to copy */
+       unsigned long src_last;                 /* starting src_addr + len */
+       unsigned long dst_last;                 /* starting dst_addr + len */
        struct folio *foliop;                   /* folio pointer for retry */
        struct userfaultfd_ctx *ctx;            /* The userfaultfd context */
 
@@ -104,15 +106,19 @@ struct uffd_info {
        bool wp;                                /* Operation is requesting write protection */
        const struct vm_uffd_ops *uffd_ops;     /* The vma uffd_ops pointer */
        int (*op)(struct uffd_info *info);      /* The operation to perform on the dst */
+       long copied;
 };
 
 #define UFFD_STRUCT_INIT(__dst_addr, __src_addr, __len, __ctx, __wp) { \
        .dst_addr = (__dst_addr),                                       \
        .src_addr = (__src_addr),                                       \
        .len = (__len),                                                 \
+       .src_last = (__src_addr) + (__len),                             \
+       .dst_last = (__dst_addr) + (__len),                             \
        .wp = (__wp),                                                   \
        .ctx = (__ctx),                                                 \
        .foliop = NULL,                                                 \
+       .copied = 0,                                                    \
 }
 
 void uffd_complete_register(struct vm_area_struct *vma);
index 0e7a1b05caee81b9c26158245b34370a9dada39a..0e9ffe87f4dd07dbc892a0aedab5d2042ed9ee9b 100644 (file)
@@ -557,11 +557,11 @@ static inline void uffd_ctx_unlock(struct uffd_info *info)
        uffd_mfill_unlock(info->dst_vma);
 }
 
-static inline unsigned long uffd_info_inc(struct uffd_info *info)
+static inline void uffd_info_inc(struct uffd_info *info)
 {
        info->dst_addr += info->increment;
        info->src_addr += info->increment;
-       return info->increment;
+       info->copied += info->increment;
 }
 
 ssize_t
@@ -646,83 +646,109 @@ static inline ssize_t uffd_info_setup(struct uffd_info *info)
        return 0;
 }
 
-static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
-                                           unsigned long dst_start,
-                                           unsigned long src_start,
-                                           unsigned long len,
-                                           uffd_flags_t flags)
+static __always_inline void mfill_atomic_validate(struct uffd_info *info)
 {
-       ssize_t err;
-       long copied;
-       struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len, ctx,
-                                                flags & MFILL_ATOMIC_WP);
-
        /*
         * Sanitize the command parameters:
         */
-       VM_WARN_ON_ONCE(dst_start & ~PAGE_MASK);
-       VM_WARN_ON_ONCE(len & ~PAGE_MASK);
+       VM_WARN_ON_ONCE(info->dst_addr & ~PAGE_MASK);
+       VM_WARN_ON_ONCE(info->len & ~PAGE_MASK);
 
        /* Does the address range wrap, or is the span zero-sized? */
-       VM_WARN_ON_ONCE(src_start + len <= src_start);
-       VM_WARN_ON_ONCE(dst_start + len <= dst_start);
-
-       copied = 0;
+       VM_WARN_ON_ONCE(info->src_last <= info->src_addr);
+       VM_WARN_ON_ONCE(info->dst_last <= info->dst_addr);
+}
 
-retry:
+static __always_inline ssize_t mfill_setup(struct uffd_info *info)
+{
+       ssize_t err;
        /*
         * Make sure the vma is not shared, that the dst range is
         * both valid and fully within a single existing vma.
         */
-       err = uffd_mfill_lock(&info);
+       err = uffd_mfill_lock(info);
        if (err)
-               goto out;
+               return err;
 
 
-       err = uffd_ctx_lock_and_validate_dst(&info);
+       err = uffd_ctx_lock_and_validate_dst(info);
        if (err)
                goto out_unlock;
 
-       err = uffd_info_setup(&info);
+       err = uffd_info_setup(info);
        if (err)
                goto out_unlock;
 
-       err = uffd_info_set_ops(&info, flags);
-       if (err)
-               goto out_unlock;
+       return 0;
+
+out_unlock:
+       uffd_ctx_unlock(info);
+       return err;
+}
+
+static __always_inline ssize_t mfill_loop(struct uffd_info *info)
+{
+       ssize_t err = 0;
 
-       while (info.src_addr < src_start + len) {
-               VM_WARN_ON_ONCE(info.dst_addr >= dst_start + len);
-               err = info.op(&info);
+       while (!err && (info->src_addr < info->src_last)) {
+               VM_WARN_ON_ONCE(info->dst_addr >= info->dst_last);
+               err = info->op(info);
                cond_resched();
                if (unlikely(err == -ENOENT)) {
-                       err = info.uffd_ops->failed_do_unlock(&info);
+                       err = info->uffd_ops->failed_do_unlock(info);
                        if (unlikely(err))
-                               goto out;
+                               return err; /* Unlocked already */
 
-                       goto retry;
+                       return -ENOENT;
                } else {
-                       VM_WARN_ON_ONCE(info.foliop);
+                       VM_WARN_ON_ONCE(info->foliop);
                }
 
                if (!err) {
-                       copied += uffd_info_inc(&info);
+                       uffd_info_inc(info);
                        if (fatal_signal_pending(current))
                                err = -EINTR;
                }
-               if (err)
-                       break;
        }
 
-out_unlock:
-       uffd_ctx_unlock(&info);
+       uffd_ctx_unlock(info);
+       return err;
+}
+
+static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
+                                           unsigned long dst_start,
+                                           unsigned long src_start,
+                                           unsigned long len,
+                                           uffd_flags_t flags)
+{
+       ssize_t err;
+       struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len, ctx,
+                                                flags & MFILL_ATOMIC_WP);
+
+       mfill_atomic_validate(&info);
+retry:
+
+       err = mfill_setup(&info);
+       if (err)
+               goto out;
+
+       err = uffd_info_set_ops(&info, flags);
+       if (err) {
+               uffd_ctx_unlock(&info);
+               goto out;
+       }
+
+       err = mfill_loop(&info);
+       if (unlikely(err == -ENOENT))
+               goto retry;
+
 out:
        if (info.foliop)
                folio_put(info.foliop);
-       VM_WARN_ON_ONCE(copied < 0);
+       VM_WARN_ON_ONCE(info.copied < 0);
        VM_WARN_ON_ONCE(err > 0);
-       VM_WARN_ON_ONCE(!copied && !err);
-       return copied ? copied : err;
+       VM_WARN_ON_ONCE(!info.copied && !err);
+       return info.copied ? info.copied : err;
 }
 
 ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,