]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Move uffd_info setup and ops setting to their own functions
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 29 Oct 2025 01:08:52 +0000 (21:08 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:52:08 +0000 (12:52 -0400)
Clean up mfill_atomic() by moving setup and ops external.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/userfaultfd.c

index 6ee08b44694d2e3d54e6ebb2a5be7893d3cbbcbf..6324c114dfd9dd9411b2bd9b2f981bb50e6ed63f 100644 (file)
@@ -619,6 +619,57 @@ uffd_get_dst_pmd(struct vm_area_struct *dst_vma, unsigned long dst_addr,
        return 0;
 }
 
+static inline ssize_t uffd_info_set_ops(struct uffd_info *info,
+               uffd_flags_t flags)
+{
+       /*
+        * For shmem mappings, khugepaged is allowed to remove page
+        * tables under us; pte_offset_map_lock() will deal with that.
+        */
+       if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
+               info->op = info->uffd_ops->poison;
+       } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
+               info->op = info->uffd_ops->cont;
+       } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
+               info->op = info->uffd_ops->copy;
+               /*
+                * The normal page fault path for a shmem will invoke
+                * the fault, fill the hole in the file and COW it right
+                * away. The result generates plain anonymous memory. So
+                * when we are asked to fill an hole in a MAP_PRIVATE
+                * shmem mapping, we'll generate anonymous memory
+                * directly without actually filling the hole. For the
+                * MAP_PRIVATE case the robustness check only happens in
+                * the pagetable (to verify it's still none) and not in
+                * the radix tree.
+                */
+       } else {
+               info->op = info->uffd_ops->zeropage;
+       }
+
+       if (!info->op)
+               return -EINVAL;
+
+       return 0;
+}
+
+static inline ssize_t uffd_info_setup(struct uffd_info *info)
+{
+
+       ssize_t err;
+
+       info->uffd_ops = vma_get_uffd_ops(info->dst_vma);
+       WARN_ON_ONCE(!info->uffd_ops->increment);
+       WARN_ON_ONCE(!info->uffd_ops->is_dst_valid);
+
+       info->increment = info->uffd_ops->increment(info->dst_vma);
+       err = info->uffd_ops->is_dst_valid(info->dst_vma, info->dst_addr,
+                                           info->len);
+       if (err)
+               return err;
+
+       return 0;
+}
 
 static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
                                            unsigned long dst_start,
@@ -657,42 +708,12 @@ retry:
        if (err)
                goto out_unlock;
 
-       info.uffd_ops = vma_get_uffd_ops(info.dst_vma);
-       WARN_ON_ONCE(!info.uffd_ops->increment);
-       WARN_ON_ONCE(!info.uffd_ops->is_dst_valid);
-
-       info.increment = info.uffd_ops->increment(info.dst_vma);
-       err = info.uffd_ops->is_dst_valid(info.dst_vma, info.dst_addr,
-                                           info.len);
+       err = uffd_info_setup(&info);
        if (err)
                goto out_unlock;
-       /*
-        * For shmem mappings, khugepaged is allowed to remove page
-        * tables under us; pte_offset_map_lock() will deal with that.
-        */
-       if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
-               info.op = info.uffd_ops->poison;
-       } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
-               info.op = info.uffd_ops->cont;
-       } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
-               info.op = info.uffd_ops->copy;
-               /*
-                * The normal page fault path for a shmem will invoke
-                * the fault, fill the hole in the file and COW it right
-                * away. The result generates plain anonymous memory. So
-                * when we are asked to fill an hole in a MAP_PRIVATE
-                * shmem mapping, we'll generate anonymous memory
-                * directly without actually filling the hole. For the
-                * MAP_PRIVATE case the robustness check only happens in
-                * the pagetable (to verify it's still none) and not in
-                * the radix tree.
-                */
-       } else {
-               info.op = info.uffd_ops->zeropage;
-       }
 
-       err = -EINVAL;
-       if (!info.op)
+       err = uffd_info_set_ops(&info, flags);
+       if (err)
                goto out_unlock;
 
        while (info.src_addr < src_start + len) {