]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Change pte_copy() prototypes to use new uffd_info struct
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Tue, 28 Oct 2025 19:17:55 +0000 (15:17 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:49:53 +0000 (12:49 -0400)
It is cleaner to use the uffd_info struct.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
include/linux/userfaultfd_k.h
mm/hugetlb.c
mm/shmem.c
mm/userfaultfd.c

index ba61ff6eba00e11133d93629d02a88ece58fbef0..e3aafc30a1a7bbdaa286076d4f9d18f1fa88904b 100644 (file)
@@ -92,15 +92,33 @@ enum mfill_atomic_mode {
        NR_MFILL_ATOMIC_MODES,
 };
 
+struct uffd_info {
+       unsigned long dst_addr;                 /* Target address */
+       unsigned long src_addr;                 /* Source address */
+       unsigned long len;                      /* Total length to copy */
+       struct folio *foliop;                   /* folio pointer for retry */
+
+       struct vm_area_struct *dst_vma;         /* Target vma */
+       unsigned long increment;                /* Size of each operation */
+       bool wp;                                /* Operation is requesting write protection */
+       const struct vm_uffd_ops *uffd_ops;     /* The vma uffd_ops pointer */
+};
+
+#define UFFD_STRUCT_INIT(__dst_addr, __src_addr, __len, __wp) {        \
+       .dst_addr = (__dst_addr),                                       \
+       .src_addr = (__src_addr),                                       \
+       .len = (__len),                                                 \
+       .wp = (__wp),                                                   \
+       .foliop = NULL,                                                 \
+}
+
 /* VMA userfaultfd operations */
 ssize_t uffd_failed_do_unlock(struct userfaultfd_ctx *ctx,
                struct vm_area_struct *dst, struct folio *folio,
                unsigned long src_addr);
 
 struct vm_uffd_ops {
-       int (*copy)(struct vm_area_struct *dst_vma, unsigned long dst_addr,
-                   unsigned long src_addr, bool wp,
-                   struct folio **foliop, unsigned long increment);
+       int (*copy)(struct uffd_info *info);
        int (*zeropage)(struct vm_area_struct *dst_vma, unsigned long dst_addr);
        int (*cont)(struct vm_area_struct *dst_vma, unsigned long dst_addr,
                    bool wp, unsigned long increment);
@@ -116,25 +134,6 @@ struct vm_uffd_ops {
                        unsigned long src_addr);
 };
 
-struct uffd_info {
-       unsigned long dst_addr;                 /* Target address */
-       unsigned long src_addr;                 /* Source address */
-       unsigned long len;                      /* Total length to copy */
-
-       struct vm_area_struct *dst_vma;         /* Target vma */
-       unsigned long increment;                /* Size of each operation */
-       bool wp;                                /* Operation is requesting write protection */
-       struct folio **foliop;                  /* folio pointer for retry */
-       const struct vm_uffd_ops *uffd_ops;     /* The vma uffd_ops pointer */
-};
-
-#define UFFD_STRUCT_INIT(__dst_addr, __src_addr, __len, __wp) {        \
-       .dst_addr = (__dst_addr),                                       \
-       .src_addr = (__src_addr),                                       \
-       .len = (__len),                                                 \
-       .wp = (__wp),                                                   \
-}
-
 #define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
 #define MFILL_ATOMIC_BIT(nr) BIT(MFILL_ATOMIC_MODE_BITS + (nr))
 #define MFILL_ATOMIC_FLAG(nr) ((__force uffd_flags_t) MFILL_ATOMIC_BIT(nr))
@@ -263,11 +262,7 @@ int mfill_atomic_pte_continue(struct vm_area_struct *dst_vma,
                unsigned long dst_addr, bool wp,
                unsigned long increment);
 
-int mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
-               unsigned long dst_addr, unsigned long src_addr,
-               bool wp, struct folio **foliop,
-               unsigned long increment);
-
+int mfill_atomic_pte_copy(struct uffd_info *info);
 int mfill_atomic_pte_zeropage(struct vm_area_struct *dst_vma,
                unsigned long dst_addr);
 
index 4bc4d4a9f5fd6d6acafa5ad6ac458c455d4bff2a..8abdb4bb89d4853167a35f6f66b41c1f07a3b43d 100644 (file)
@@ -5536,10 +5536,7 @@ static int hugetlb_mfill_pte_continue(struct vm_area_struct *dst_vma,
                unsigned long dst_addr, bool wp_enabled,
                unsigned long increment);
 
-static int hugetlb_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
-               unsigned long dst_addr, unsigned long src_addr,
-               bool wp_enabled, struct folio **foliop,
-               unsigned long increment);
+static int hugetlb_mfill_atomic_pte_copy(struct uffd_info *info);
 
 static const struct vm_uffd_ops hugetlb_uffd_ops = {
        .copy                   =       hugetlb_mfill_atomic_pte_copy,
@@ -7157,14 +7154,13 @@ out_release_unlock:
  * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
  * with modifications for hugetlb pages.
  */
-static int hugetlb_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
-               unsigned long dst_addr, unsigned long src_addr,
-               bool wp_enabled, struct folio **foliop,
-               unsigned long increment)
+static int hugetlb_mfill_atomic_pte_copy(struct uffd_info *info)
 {
+       struct vm_area_struct *dst_vma = info->dst_vma;
        struct mm_struct *dst_mm = dst_vma->vm_mm;
        struct hstate *h = hstate_vma(dst_vma);
        struct address_space *mapping = dst_vma->vm_file->f_mapping;
+       unsigned long dst_addr = info->dst_addr;
        pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
        unsigned long size = huge_page_size(h);
        int vm_shared = dst_vma->vm_flags & VM_SHARED;
@@ -7176,12 +7172,12 @@ static int hugetlb_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
        bool folio_in_pagecache = false;
        u32 hash;
 
-       ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
-                                   &dst_pte, &hash, wp_enabled);
+       ret = hugetlb_mfill_prepare(dst_vma, dst_addr, info->increment,
+                                   &dst_pte, &hash, info->wp);
        if (ret)
                return ret;
 
-       if (!*foliop) {
+       if (!info->foliop) {
                /* If a folio already exists, then it's UFFDIO_COPY for
                 * a non-missing case. Return -EEXIST.
                 */
@@ -7202,7 +7198,8 @@ static int hugetlb_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
                        goto out;
                }
 
-               ret = copy_folio_from_user(folio, (const void __user *) src_addr,
+               ret = copy_folio_from_user(folio,
+                                          (const void __user *) info->src_addr,
                                           false);
 
                /* fallback to copy_from_user outside mmap_lock */
@@ -7222,7 +7219,7 @@ static int hugetlb_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
                                ret = -ENOMEM;
                                goto out;
                        }
-                       *foliop = folio;
+                       info->foliop = folio;
                        /* Set the outparam foliop and return to the caller to
                         * copy the contents outside the lock. Don't free the
                         * folio.
@@ -7232,22 +7229,22 @@ static int hugetlb_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
        } else {
                if (vm_shared &&
                    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
-                       folio_put(*foliop);
+                       folio_put(info->foliop);
                        ret = -EEXIST;
-                       *foliop = NULL;
+                       info->foliop = NULL;
                        goto out;
                }
 
                folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
                if (IS_ERR(folio)) {
-                       folio_put(*foliop);
+                       folio_put(info->foliop);
                        ret = -ENOMEM;
-                       *foliop = NULL;
+                       info->foliop = NULL;
                        goto out;
                }
-               ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
-               folio_put(*foliop);
-               *foliop = NULL;
+               ret = copy_user_large_folio(folio, info->foliop, dst_addr, dst_vma);
+               folio_put(info->foliop);
+               info->foliop = NULL;
                if (ret) {
                        folio_put(folio);
                        goto out;
@@ -7309,7 +7306,7 @@ static int hugetlb_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
         * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
         * with wp flag set, don't set pte write bit.
         */
-       _dst_pte = make_huge_pte(dst_vma, folio, !wp_enabled);
+       _dst_pte = make_huge_pte(dst_vma, folio, !info->wp);
        /*
         * Always mark UFFDIO_COPY page dirty; note that this may not be
         * extremely important for hugetlbfs for now since swapping is not
@@ -7319,7 +7316,7 @@ static int hugetlb_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
        _dst_pte = huge_pte_mkdirty(_dst_pte);
        _dst_pte = pte_mkyoung(_dst_pte);
 
-       if (wp_enabled)
+       if (info->wp)
                _dst_pte = huge_pte_mkuffd_wp(_dst_pte);
 
        set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
index c049d2c2594625e236e7fbde33b1fe9a66d4c066..eb93f6a1ef00fb674584d5d018a28c3811d02ed3 100644 (file)
@@ -3234,21 +3234,19 @@ static int shmem_mfill_atomic_pte_zeropage(struct vm_area_struct *dst_vma,
        return ret;
 }
 
-static int shmem_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
-               unsigned long dst_addr, unsigned long src_addr,
-               bool wp, struct folio **foliop,
-               unsigned long increment)
+static int shmem_mfill_atomic_pte_copy(struct uffd_info *u_info)
 {
+       struct vm_area_struct *dst_vma = u_info->dst_vma;
        struct inode *inode = file_inode(dst_vma->vm_file);
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct address_space *mapping = inode->i_mapping;
        gfp_t gfp = mapping_gfp_mask(mapping);
-       pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
+       pgoff_t pgoff = linear_page_index(dst_vma, u_info->dst_addr);
        struct folio *folio;
        int ret;
        pmd_t *dst_pmd;
 
-       ret = uffd_get_dst_pmd(dst_vma, dst_addr, &dst_pmd);
+       ret = uffd_get_dst_pmd(dst_vma, u_info->dst_addr, &dst_pmd);
        if (ret)
                return ret;
 
@@ -3258,33 +3256,33 @@ static int shmem_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
                 * and now we find ourselves with -ENOMEM. Release the page, to
                 * avoid a BUG_ON in our caller.
                 */
-               if (unlikely(*foliop)) {
-                       folio_put(*foliop);
-                       *foliop = NULL;
+               if (unlikely(u_info->foliop)) {
+                       folio_put(u_info->foliop);
+                       u_info->foliop = NULL;
                }
                return -ENOMEM;
        }
 
-       if (!*foliop) {
+       if (!u_info->foliop) {
                ret = -ENOMEM;
                folio = shmem_alloc_folio(gfp, 0, info, pgoff);
                if (!folio)
                        goto out_unacct_blocks;
 
-               ret = uffd_atomic_pte_copy(folio, src_addr);
+               ret = uffd_atomic_pte_copy(folio, u_info->src_addr);
                if (ret) {
-                       *foliop = folio;
+                       u_info->foliop = folio;
                        goto out_unacct_blocks;
                }
 
        } else {
-               folio = *foliop;
+               folio = u_info->foliop;
                VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
-               *foliop = NULL;
+               u_info->foliop = NULL;
        }
 
-       ret = shmem_mfill_complete(dst_vma, dst_addr, dst_pmd, pgoff, folio,
-                                  inode, gfp);
+       ret = shmem_mfill_complete(dst_vma, u_info->dst_addr, dst_pmd, pgoff,
+                                  folio, inode, gfp);
        return ret;
 
 out_unacct_blocks:
index 9c45223e7d9262cdb82f4bbfec5ba80e7469c49a..5f0d672c38eec0633b78bc346fe2696185849263 100644 (file)
@@ -272,35 +272,33 @@ inline int uffd_atomic_pte_copy(struct folio *folio, unsigned long src_addr)
        return 0;
 }
 
-int mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
-               unsigned long dst_addr, unsigned long src_addr,
-               bool wp, struct folio **foliop,
-               unsigned long increment)
+int mfill_atomic_pte_copy(struct uffd_info *info)
 {
        int ret;
        struct folio *folio;
        pmd_t *dst_pmd;
+       struct vm_area_struct *dst_vma = info->dst_vma;
 
-       ret = uffd_get_dst_pmd(dst_vma, dst_addr, &dst_pmd);
+       ret = uffd_get_dst_pmd(dst_vma, info->dst_addr, &dst_pmd);
        if (ret)
                return ret;
 
-       if (!*foliop) {
+       if (!info->foliop) {
                ret = -ENOMEM;
                folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
-                                       dst_addr);
+                                       info->dst_addr);
                if (!folio)
                        goto out;
 
-               ret = uffd_atomic_pte_copy(folio, src_addr);
+               ret = uffd_atomic_pte_copy(folio, info->src_addr);
                if (ret) {
-                       *foliop = folio;
+                       info->foliop = folio;
                        goto out;
                }
 
        } else {
-               folio = *foliop;
-               *foliop = NULL;
+               folio = info->foliop;
+               info->foliop = NULL;
        }
 
        /*
@@ -314,8 +312,8 @@ int mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
        if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
                goto out_release;
 
-       ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
-                                      &folio->page, true, wp);
+       ret = mfill_atomic_install_pte(dst_pmd, dst_vma, info->dst_addr,
+                                      &folio->page, true, info->wp);
        if (ret)
                goto out_release;
 out:
@@ -587,7 +585,6 @@ uffd_ctx_lock_and_validate_dst(struct userfaultfd_ctx *ctx,
                return -EINVAL;
 
        WARN_ON_ONCE(!info->uffd_ops->is_dst_valid);
-
        return info->uffd_ops->is_dst_valid(info->dst_vma, info->dst_addr,
                                            info->len);
 }
@@ -632,9 +629,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
        struct mm_struct *dst_mm = ctx->mm;
        struct vm_area_struct *dst_vma;
        ssize_t err;
-       unsigned long src_addr, dst_addr;
        long copied;
-       struct folio *folio;
        unsigned long increment;
        struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len,
                                                 flags & MFILL_ATOMIC_WP);
@@ -649,10 +644,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
        VM_WARN_ON_ONCE(src_start + len <= src_start);
        VM_WARN_ON_ONCE(dst_start + len <= dst_start);
 
-       src_addr = src_start;
-       dst_addr = dst_start;
        copied = 0;
-       folio = NULL;
 
 retry:
        /*
@@ -691,24 +683,22 @@ retry:
                goto out_unlock;
 
        info.increment = increment;
-       while (src_addr < src_start + len) {
-               VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
+       while (info.src_addr < src_start + len) {
+               VM_WARN_ON_ONCE(info.dst_addr >= dst_start + len);
 
                /*
                 * For shmem mappings, khugepaged is allowed to remove page
                 * tables under us; pte_offset_map_lock() will deal with that.
                 */
                if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
-                       err = info.uffd_ops->poison(dst_vma, dst_addr,
+                       err = info.uffd_ops->poison(dst_vma, info.dst_addr,
                                                    increment);
                } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
-                       err = info.uffd_ops->cont(dst_vma, dst_addr,
+                       err = info.uffd_ops->cont(dst_vma, info.dst_addr,
                                             flags & MFILL_ATOMIC_WP,
                                             increment);
                } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
-                       err = info.uffd_ops->copy(dst_vma, dst_addr, src_addr,
-                                            flags & MFILL_ATOMIC_WP,
-                                            &folio, increment);
+                       err = info.uffd_ops->copy(&info);
                        /*
                         * The normal page fault path for a shmem will invoke
                         * the fault, fill the hole in the file and COW it right
@@ -721,25 +711,25 @@ retry:
                         * the radix tree.
                         */
                } else {
-                       err = info.uffd_ops->zeropage(dst_vma, dst_addr);
+                       err = info.uffd_ops->zeropage(dst_vma, info.dst_addr);
                }
 
                cond_resched();
 
                if (unlikely(err == -ENOENT)) {
                        err = info.uffd_ops->failed_do_unlock(ctx, dst_vma,
-                                                             folio, src_addr);
+                                             info.foliop, info.src_addr);
                        if (unlikely(err))
                                goto out;
 
                        goto retry;
                } else {
-                       VM_WARN_ON_ONCE(folio);
+                       VM_WARN_ON_ONCE(info.foliop);
                }
 
                if (!err) {
-                       dst_addr += increment;
-                       src_addr += increment;
+                       info.dst_addr += increment;
+                       info.src_addr += increment;
                        copied += increment;
 
                        if (fatal_signal_pending(current))
@@ -753,8 +743,8 @@ out_unlock:
        up_read(&ctx->map_changing_lock);
        uffd_mfill_unlock(dst_vma);
 out:
-       if (folio)
-               folio_put(folio);
+       if (info.foliop)
+               folio_put(info.foliop);
        VM_WARN_ON_ONCE(copied < 0);
        VM_WARN_ON_ONCE(err > 0);
        VM_WARN_ON_ONCE(!copied && !err);