]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: move hugetlb_mfill_prepare() into hugetlb_mfill_atomic_pte()
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 23 Oct 2025 19:22:16 +0000 (15:22 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:21:40 +0000 (12:21 -0400)
hugetlb_mfill_atomic_pte() is the only user of hugetlb_mfill_prepare(),
so move the call into the function.  Reducing the external hugetlb
calls.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
include/linux/hugetlb.h
mm/hugetlb.c
mm/userfaultfd.c

index 14701bcd691e76829b8b41e92695f7b6dd3feb9e..c7e315133beba4581315bd7d9794974a9201923e 100644 (file)
@@ -142,14 +142,9 @@ unsigned long hugetlb_total_pages(void);
 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, unsigned int flags);
 #ifdef CONFIG_USERFAULTFD
-ssize_t hugetlb_mfill_prepare(struct vm_area_struct *dst_vma, unsigned long dst_addr,
-               unsigned long increment, pte_t **dst_pte, u32 *hash, uffd_flags_t flags);
-int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
-                            struct vm_area_struct *dst_vma,
-                            unsigned long dst_addr,
-                            unsigned long src_addr,
-                            uffd_flags_t flags,
-                            struct folio **foliop, u32 hash);
+int hugetlb_mfill_atomic_pte(struct vm_area_struct *dst_vma,
+               unsigned long dst_addr, unsigned long src_addr,
+               uffd_flags_t flags, struct folio **foliop, unsigned long hash);
 #endif /* CONFIG_USERFAULTFD */
 long hugetlb_reserve_pages(struct inode *inode, long from, long to,
                           struct vm_area_desc *desc, vm_flags_t vm_flags);
index 35dfc04b352a77cab93f2dfcd6e9c9cddf5ce63f..c7409c7cee282e734fc667f6d0847268bd15d37f 100644 (file)
@@ -6904,6 +6904,7 @@ out_mutex:
 
 #ifdef CONFIG_USERFAULTFD
 
+static inline
 ssize_t hugetlb_mfill_prepare(struct vm_area_struct *dst_vma, unsigned long dst_addr,
                unsigned long increment, pte_t **dst_pte, u32 *hash, uffd_flags_t flags)
 {
@@ -6986,13 +6987,12 @@ static ssize_t hugetlb_is_dst_valid(struct vm_area_struct *dst_vma,
  * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
  * with modifications for hugetlb pages.
  */
-int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
-                            struct vm_area_struct *dst_vma,
+int hugetlb_mfill_atomic_pte(struct vm_area_struct *dst_vma,
                             unsigned long dst_addr,
                             unsigned long src_addr,
                             uffd_flags_t flags,
                             struct folio **foliop,
-                            u32 hash)
+                            unsigned long increment)
 {
        struct mm_struct *dst_mm = dst_vma->vm_mm;
        bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
@@ -7003,10 +7003,17 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
        unsigned long size = huge_page_size(h);
        int vm_shared = dst_vma->vm_flags & VM_SHARED;
        pte_t _dst_pte;
+       pte_t *dst_pte;
        spinlock_t *ptl;
        int ret = -ENOMEM;
        struct folio *folio;
        bool folio_in_pagecache = false;
+       u32 hash;
+
+       ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
+                                   &dst_pte, &hash, flags);
+       if (ret)
+               return ret;
 
        if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
                ptl = huge_pte_lock(h, dst_mm, dst_pte);
index 82b1046c903705e07f2da1ebf591ab0c13dd158a..8134f206c79349ed699ae98ccc412ea79c7f2eb5 100644 (file)
@@ -544,8 +544,7 @@ uffd_ctx_lock_and_validate_dst(struct userfaultfd_ctx *ctx,
  * called with either vma-lock or mmap_lock held, it will release the lock
  * before returning.
  */
-static __always_inline ssize_t mfill_atomic_hugetlb(
-                                             struct userfaultfd_ctx *ctx,
+static __always_inline ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
                                              struct vm_area_struct *dst_vma,
                                              unsigned long dst_start,
                                              unsigned long src_start,
@@ -554,11 +553,9 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
 {
        struct mm_struct *dst_mm = dst_vma->vm_mm;
        ssize_t err;
-       pte_t *dst_pte;
        unsigned long src_addr, dst_addr;
        long copied;
        struct folio *folio;
-       u32 hash;
        const struct vm_uffd_ops *uffd_ops;
        unsigned long increment;
 
@@ -603,13 +600,8 @@ retry:
        while (src_addr < src_start + len) {
                VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
 
-               err = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
-                                           &dst_pte, &hash, flags);
-               if (err)
-                       goto out_unlock;
-
-               err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
-                                              src_addr, flags, &folio, hash);
+               err = hugetlb_mfill_atomic_pte(dst_vma, dst_addr, src_addr,
+                                              flags, &folio, increment);
 
 
                cond_resched();