]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Bring hugetlb into the mfill_atomic() loop
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Mon, 27 Oct 2025 20:01:23 +0000 (16:01 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:36:22 +0000 (12:36 -0400)
Since all the hugetlb setup is now abstracted away, avoid branching off
to hugetlb until in the loop.  The mfill_atomic() loop will now handle
hugetlb as its own entity for all mfill operations.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/userfaultfd.c

index 94e57865c0436a5c6da77dfb1c16f650f1b6359a..a62c0a05cdfa98c74969142cad4de526309b1dce 100644 (file)
@@ -590,109 +590,6 @@ uffd_ctx_lock_and_validate_dst(struct userfaultfd_ctx *ctx,
        return uffd_ops->is_dst_valid(dst_vma, dst_start, len);
 }
 
-#ifdef CONFIG_HUGETLB_PAGE
-/*
- * mfill_atomic processing for HUGETLB vmas.  Note that this routine is
- * called with either vma-lock or mmap_lock held, it will release the lock
- * before returning.
- */
-static __always_inline ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
-                                             struct vm_area_struct *dst_vma,
-                                             unsigned long dst_start,
-                                             unsigned long src_start,
-                                             unsigned long len,
-                                             uffd_flags_t flags)
-{
-       struct mm_struct *dst_mm = dst_vma->vm_mm;
-       ssize_t err;
-       unsigned long src_addr, dst_addr;
-       long copied;
-       struct folio *folio;
-       const struct vm_uffd_ops *uffd_ops;
-       unsigned long increment;
-
-       src_addr = src_start;
-       dst_addr = dst_start;
-       copied = 0;
-       folio = NULL;
-
-retry:
-       /*
-        * On routine entry dst_vma is set.  If we had to drop mmap_lock and
-        * retry, dst_vma will be set to NULL and we must lookup again.
-        */
-       if (!dst_vma) {
-               dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
-               if (IS_ERR(dst_vma)) {
-                       err = PTR_ERR(dst_vma);
-                       goto out;
-               }
-
-               err = uffd_ctx_lock_and_validate_dst(ctx, dst_vma, dst_start,
-                                                    len, flags);
-               if (err)
-                       goto out_unlock;
-       }
-
-       uffd_ops = vma_get_uffd_ops(dst_vma);
-       if (uffd_ops && uffd_ops->increment)
-               increment = uffd_ops->increment(dst_vma);
-
-       while (src_addr < src_start + len) {
-               VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
-
-               err = hugetlb_mfill_atomic_pte(dst_vma, dst_addr, src_addr,
-                                              flags, &folio, increment);
-
-
-               cond_resched();
-
-               if (unlikely(err == -ENOENT)) {
-                       err = uffd_ops->failed_do_unlock(ctx, dst_vma, folio,
-                                                        src_addr);
-                       if (unlikely(err)) {
-                               err = -EFAULT;
-                               goto out;
-                       }
-
-                       dst_vma = NULL;
-                       goto retry;
-               } else
-                       VM_WARN_ON_ONCE(folio);
-
-               if (!err) {
-                       dst_addr += increment;
-                       src_addr += increment;
-                       copied += increment;
-
-                       if (fatal_signal_pending(current))
-                               err = -EINTR;
-               }
-               if (err)
-                       break;
-       }
-
-out_unlock:
-       up_read(&ctx->map_changing_lock);
-       uffd_mfill_unlock(dst_vma);
-out:
-       if (folio)
-               folio_put(folio);
-       VM_WARN_ON_ONCE(copied < 0);
-       VM_WARN_ON_ONCE(err > 0);
-       VM_WARN_ON_ONCE(!copied && !err);
-       return copied ? copied : err;
-}
-#else /* !CONFIG_HUGETLB_PAGE */
-/* fail at build time if gcc attempts to use this */
-extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
-                                   struct vm_area_struct *dst_vma,
-                                   unsigned long dst_start,
-                                   unsigned long src_start,
-                                   unsigned long len,
-                                   uffd_flags_t flags);
-#endif /* CONFIG_HUGETLB_PAGE */
-
 ssize_t
 uffd_get_dst_pmd(struct vm_area_struct *dst_vma, unsigned long dst_addr,
                 pmd_t **dst_pmd)
@@ -776,15 +673,8 @@ retry:
        if (err)
                goto out_unlock;
 
-       /*
-        * If this is a HUGETLB vma, pass off to appropriate routine
-        */
-       if (is_vm_hugetlb_page(dst_vma))
-               return  mfill_atomic_hugetlb(ctx, dst_vma, dst_start,
-                                            src_start, len, flags);
-
        err = -EINVAL;
-       if (!vma_is_shmem(dst_vma) &&
+       if (!(vma_is_shmem(dst_vma) || is_vm_hugetlb_page(dst_vma)) &&
            uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
                goto out_unlock;
 
@@ -795,7 +685,10 @@ retry:
                 * For shmem mappings, khugepaged is allowed to remove page
                 * tables under us; pte_offset_map_lock() will deal with that.
                 */
-               if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
+               if (is_vm_hugetlb_page(dst_vma)) {
+                       err = hugetlb_mfill_atomic_pte(dst_vma, dst_addr,
+                                       src_addr, flags, &folio, increment);
+               } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
                        err = mfill_atomic_pte_continue(dst_vma, dst_addr,
                                                        flags);
                } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {