]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Move hugetlb unlocks into hugetlb_mfill_atomic_pte()
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 23 Oct 2025 19:10:04 +0000 (15:10 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:18:52 +0000 (12:18 -0400)
Instead of calling the unlock in the caller of uffd_ops->increment(),
move the unlocks into the function itself.  This moves the memory types
closer to using the same mfill_atomic() implementation.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
include/linux/hugetlb.h
mm/hugetlb.c
mm/userfaultfd.c

index 2387513d6ae539a0305bdc37fa639c0ee0c5c5e1..6f2f68c73d07f6c27851ca0eb164a65249b1d964 100644 (file)
@@ -147,7 +147,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                             unsigned long dst_addr,
                             unsigned long src_addr,
                             uffd_flags_t flags,
-                            struct folio **foliop);
+                            struct folio **foliop, u32 hash);
 #endif /* CONFIG_USERFAULTFD */
 long hugetlb_reserve_pages(struct inode *inode, long from, long to,
                           struct vm_area_desc *desc, vm_flags_t vm_flags);
index f27ce58bf6cc59181405beda259e004e143386e2..d59975c1e101247c5271940d5a47552c7b5c34eb 100644 (file)
@@ -6955,7 +6955,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                             unsigned long dst_addr,
                             unsigned long src_addr,
                             uffd_flags_t flags,
-                            struct folio **foliop)
+                            struct folio **foliop,
+                            u32 hash)
 {
        struct mm_struct *dst_mm = dst_vma->vm_mm;
        bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
@@ -6974,10 +6975,11 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
        if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
                ptl = huge_pte_lock(h, dst_mm, dst_pte);
 
+               ret = -EEXIST;
                /* Don't overwrite any existing PTEs (even markers) */
                if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
                        spin_unlock(ptl);
-                       return -EEXIST;
+                       goto out;
                }
 
                _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
@@ -6987,7 +6989,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
                spin_unlock(ptl);
-               return 0;
+               ret = 0;
+               goto out;
        }
 
        if (is_continue) {
@@ -7155,6 +7158,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                folio_unlock(folio);
        ret = 0;
 out:
+       hugetlb_vma_unlock_read(dst_vma);
+       mutex_unlock(&hugetlb_fault_mutex_table[hash]);
        return ret;
 out_release_unlock:
        spin_unlock(ptl);
index cf1722a62ae20462d25e44ac2efbc655c86f4b8e..346b7b69b29e3e52be9f9ab0af0b1ef7eff349c3 100644 (file)
@@ -634,10 +634,8 @@ retry:
                }
 
                err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
-                                              src_addr, flags, &folio);
+                                              src_addr, flags, &folio, hash);
 
-               hugetlb_vma_unlock_read(dst_vma);
-               mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
                cond_resched();