]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Move hugetlb dst_pte setup to a new function
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 23 Oct 2025 19:10:50 +0000 (15:10 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:20:16 +0000 (12:20 -0400)
Create a hugetlb_mfill_prepare() in hugetlb.c and move the code from
within mfill_atomic_hugetlb() out of mm/userfaultfd.c.

This is in an attempt to help reviewers see that mfill_atomic() can be
used for all memory types.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
include/linux/hugetlb.h
mm/hugetlb.c
mm/userfaultfd.c

index 6f2f68c73d07f6c27851ca0eb164a65249b1d964..14701bcd691e76829b8b41e92695f7b6dd3feb9e 100644 (file)
@@ -142,6 +142,8 @@ unsigned long hugetlb_total_pages(void);
 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, unsigned int flags);
 #ifdef CONFIG_USERFAULTFD
+ssize_t hugetlb_mfill_prepare(struct vm_area_struct *dst_vma, unsigned long dst_addr,
+               unsigned long increment, pte_t **dst_pte, u32 *hash, uffd_flags_t flags);
 int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                             struct vm_area_struct *dst_vma,
                             unsigned long dst_addr,
index d59975c1e101247c5271940d5a47552c7b5c34eb..35dfc04b352a77cab93f2dfcd6e9c9cddf5ce63f 100644 (file)
@@ -6903,6 +6903,42 @@ out_mutex:
 }
 
 #ifdef CONFIG_USERFAULTFD
+
+ssize_t hugetlb_mfill_prepare(struct vm_area_struct *dst_vma, unsigned long dst_addr,
+               unsigned long increment, pte_t **dst_pte, u32 *hash, uffd_flags_t flags)
+{
+       struct mm_struct *dst_mm = dst_vma->vm_mm;
+       struct address_space *mapping;
+       pgoff_t idx;
+       /*
+        * Serialize via vma_lock and hugetlb_fault_mutex.
+        * vma_lock ensures the dst_pte remains valid even
+        * in the case of shared pmds.  fault mutex prevents
+        * races with other faulting threads.
+        */
+       idx = linear_page_index(dst_vma, dst_addr);
+       mapping = dst_vma->vm_file->f_mapping;
+       *hash = hugetlb_fault_mutex_hash(mapping, idx);
+       mutex_lock(&hugetlb_fault_mutex_table[*hash]);
+       hugetlb_vma_lock_read(dst_vma);
+
+       *dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, increment);
+       if (!*dst_pte) {
+               hugetlb_vma_unlock_read(dst_vma);
+               mutex_unlock(&hugetlb_fault_mutex_table[*hash]);
+               return -ENOMEM;
+       }
+
+       if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
+           !huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, *dst_pte))) {
+               hugetlb_vma_unlock_read(dst_vma);
+               mutex_unlock(&hugetlb_fault_mutex_table[*hash]);
+               return -EEXIST;
+       }
+
+       return 0;
+}
+
 /*
  * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
  */
index 346b7b69b29e3e52be9f9ab0af0b1ef7eff349c3..82b1046c903705e07f2da1ebf591ab0c13dd158a 100644 (file)
@@ -558,9 +558,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
        unsigned long src_addr, dst_addr;
        long copied;
        struct folio *folio;
-       pgoff_t idx;
        u32 hash;
-       struct address_space *mapping;
        const struct vm_uffd_ops *uffd_ops;
        unsigned long increment;
 
@@ -605,33 +603,10 @@ retry:
        while (src_addr < src_start + len) {
                VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
 
-               /*
-                * Serialize via vma_lock and hugetlb_fault_mutex.
-                * vma_lock ensures the dst_pte remains valid even
-                * in the case of shared pmds.  fault mutex prevents
-                * races with other faulting threads.
-                */
-               idx = linear_page_index(dst_vma, dst_addr);
-               mapping = dst_vma->vm_file->f_mapping;
-               hash = hugetlb_fault_mutex_hash(mapping, idx);
-               mutex_lock(&hugetlb_fault_mutex_table[hash]);
-               hugetlb_vma_lock_read(dst_vma);
-
-               err = -ENOMEM;
-               dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, increment);
-               if (!dst_pte) {
-                       hugetlb_vma_unlock_read(dst_vma);
-                       mutex_unlock(&hugetlb_fault_mutex_table[hash]);
-                       goto out_unlock;
-               }
-
-               if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
-                   !huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
-                       err = -EEXIST;
-                       hugetlb_vma_unlock_read(dst_vma);
-                       mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+               err = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
+                                           &dst_pte, &hash, flags);
+               if (err)
                        goto out_unlock;
-               }
 
                err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
                                               src_addr, flags, &folio, hash);