]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Add uffd_info pointer to operation
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 29 Oct 2025 00:22:34 +0000 (20:22 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:50:40 +0000 (12:50 -0400)
Now that all the potential operations take the same argument, a pointer
can be used instead of checking the flags.

This also means checking for compatibility simply becomes checking if
there is a pointer set.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
include/linux/userfaultfd_k.h
mm/userfaultfd.c

index 48de71e7e599a5ca6d02441b6efb7019da10cfcb..1dc55425f9d2760348445240252fa8e55eac7ccc 100644 (file)
@@ -102,6 +102,7 @@ struct uffd_info {
        unsigned long increment;                /* Size of each operation */
        bool wp;                                /* Operation is requesting write protection */
        const struct vm_uffd_ops *uffd_ops;     /* The vma uffd_ops pointer */
+       int (*op)(struct uffd_info *info);      /* The operation to perform on the dst */
 };
 
 #define UFFD_STRUCT_INIT(__dst_addr, __src_addr, __len, __wp) {        \
index 1d80abd625a2c6eb8ec5b3c08071c83209edf3d0..90e07c8568aaef7df3ccc39aeb7583ebae25a4fa 100644 (file)
@@ -631,7 +631,6 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
        struct vm_area_struct *dst_vma;
        ssize_t err;
        long copied;
-       unsigned long increment;
        struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len,
                                                 flags & MFILL_ATOMIC_WP);
 
@@ -659,61 +658,48 @@ retry:
        dst_vma = info.dst_vma;
        info.uffd_ops = vma_get_uffd_ops(dst_vma);
        if (WARN_ON_ONCE(!info.uffd_ops->increment)) {
-               increment = PAGE_SIZE;
+               info.increment = PAGE_SIZE;
        } else {
-               increment = info.uffd_ops->increment(dst_vma);
+               info.increment = info.uffd_ops->increment(dst_vma);
        }
 
        err = uffd_ctx_lock_and_validate_dst(ctx, &info);
        if (err)
                goto out_unlock;
 
-       err = -EINVAL;
-       if (!info.uffd_ops->cont &&
-           uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
-               goto out_unlock;
-
        /*
-        * There is no default zero huge page for all huge page sizes as
-        * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
-        * by THP.  Since we can not reliably insert a zero page, this
-        * feature is not supported.
+        * For shmem mappings, khugepaged is allowed to remove page
+        * tables under us; pte_offset_map_lock() will deal with that.
         */
-       if (!info.uffd_ops->zeropage &&
-           uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE))
+       if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
+               info.op = info.uffd_ops->poison;
+       } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
+               info.op = info.uffd_ops->cont;
+       } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
+               info.op = info.uffd_ops->copy;
+               /*
+                * The normal page fault path for a shmem will invoke
+                * the fault, fill the hole in the file and COW it right
+                * away. The result generates plain anonymous memory. So
+                * when we are asked to fill an hole in a MAP_PRIVATE
+                * shmem mapping, we'll generate anonymous memory
+                * directly without actually filling the hole. For the
+                * MAP_PRIVATE case the robustness check only happens in
+                * the pagetable (to verify it's still none) and not in
+                * the radix tree.
+                */
+       } else {
+               info.op = info.uffd_ops->zeropage;
+       }
+
+       err = -EINVAL;
+       if (!info.op)
                goto out_unlock;
 
-       info.increment = increment;
        while (info.src_addr < src_start + len) {
                VM_WARN_ON_ONCE(info.dst_addr >= dst_start + len);
-
-               /*
-                * For shmem mappings, khugepaged is allowed to remove page
-                * tables under us; pte_offset_map_lock() will deal with that.
-                */
-               if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
-                       err = info.uffd_ops->poison(&info);
-               } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
-                       err = info.uffd_ops->cont(&info);
-               } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
-                       err = info.uffd_ops->copy(&info);
-                       /*
-                        * The normal page fault path for a shmem will invoke
-                        * the fault, fill the hole in the file and COW it right
-                        * away. The result generates plain anonymous memory. So
-                        * when we are asked to fill an hole in a MAP_PRIVATE
-                        * shmem mapping, we'll generate anonymous memory
-                        * directly without actually filling the hole. For the
-                        * MAP_PRIVATE case the robustness check only happens in
-                        * the pagetable (to verify it's still none) and not in
-                        * the radix tree.
-                        */
-               } else {
-                       err = info.uffd_ops->zeropage(&info);
-               }
-
+               err = info.op(&info);
                cond_resched();
-
                if (unlikely(err == -ENOENT)) {
                        err = info.uffd_ops->failed_do_unlock(ctx, dst_vma,
                                              info.foliop, info.src_addr);
@@ -726,10 +712,9 @@ retry:
                }
 
                if (!err) {
-                       info.dst_addr += increment;
-                       info.src_addr += increment;
-                       copied += increment;
-
+                       info.dst_addr += info.increment;
+                       info.src_addr += info.increment;
+                       copied += info.increment;
                        if (fatal_signal_pending(current))
                                err = -EINTR;
                }