]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/hugetlb: Extract poison from hugetlb_mfill_atomic_pte()
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 24 Oct 2025 02:53:36 +0000 (22:53 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:38:00 +0000 (12:38 -0400)
Split out the poison option from hugetlb_mfill_atomic_pte(), with the
long term goal to have a single entry into the call path of
mfill_atomic_pte_poison().

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/hugetlb.c

index bfd60f30827240e6748360b3224acbd4892da7ba..47eda0866d52dfc98097d9dceb5246ecff67a6a8 100644 (file)
@@ -7000,6 +7000,48 @@ static ssize_t hugetlb_is_dst_valid(struct vm_area_struct *dst_vma,
 
        return 0;
 }
+
+static int hugetlb_mfill_pte_poison(struct vm_area_struct *dst_vma,
+               unsigned long dst_addr, unsigned long increment)
+{
+       struct mm_struct *dst_mm = dst_vma->vm_mm;
+       struct hstate *h = hstate_vma(dst_vma);
+       unsigned long size = huge_page_size(h);
+       pte_t _dst_pte;
+       pte_t *dst_pte;
+       spinlock_t *ptl;
+       int ret;
+       u32 hash;
+
+       ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
+                                   &dst_pte, &hash,
+                                   0 /* Not a continue, no one cares. */);
+       if (ret)
+               return ret;
+
+       ptl = huge_pte_lock(h, dst_mm, dst_pte);
+
+       ret = -EEXIST;
+       /* Don't overwrite any existing PTEs (even markers) */
+       if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
+               spin_unlock(ptl);
+               goto out;
+       }
+
+       _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
+       set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
+
+       /* No need to invalidate - it was non-present before */
+       update_mmu_cache(dst_vma, dst_addr, dst_pte);
+
+       spin_unlock(ptl);
+       ret = 0;
+out:
+       hugetlb_vma_unlock_read(dst_vma);
+       mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+       return 0;
+}
+
 /*
  * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
  * with modifications for hugetlb pages.
@@ -7027,32 +7069,14 @@ int hugetlb_mfill_atomic_pte(struct vm_area_struct *dst_vma,
        bool folio_in_pagecache = false;
        u32 hash;
 
+       if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON))
+               return hugetlb_mfill_pte_poison(dst_vma, dst_addr, increment);
+
        ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
                                    &dst_pte, &hash, flags);
        if (ret)
                return ret;
 
-       if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
-               ptl = huge_pte_lock(h, dst_mm, dst_pte);
-
-               ret = -EEXIST;
-               /* Don't overwrite any existing PTEs (even markers) */
-               if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
-                       spin_unlock(ptl);
-                       goto out;
-               }
-
-               _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
-               set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
-
-               /* No need to invalidate - it was non-present before */
-               update_mmu_cache(dst_vma, dst_addr, dst_pte);
-
-               spin_unlock(ptl);
-               ret = 0;
-               goto out;
-       }
-
        if (is_continue) {
                ret = -EFAULT;
                folio = filemap_lock_hugetlb_folio(h, mapping, idx);