]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/userfaultfd: Use hugetlb uffd_ops for poison
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 24 Oct 2025 03:01:03 +0000 (23:01 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Thu, 30 Oct 2025 16:39:55 +0000 (12:39 -0400)
Move the hugetlb branch in mfill_atomic() lower so that the function
pointer for the poison operation can be used by all memory types.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
mm/hugetlb.c
mm/userfaultfd.c

index 42cb8e5c32ca6ecf73b738bc8ef9ecc6bb12264a..4339a31bac3b74c5e5fc09985bfd99c1dfb9782d 100644 (file)
@@ -7073,9 +7073,6 @@ int hugetlb_mfill_atomic_pte(struct vm_area_struct *dst_vma,
        bool folio_in_pagecache = false;
        u32 hash;
 
-       if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON))
-               return hugetlb_mfill_pte_poison(dst_vma, dst_addr, increment);
-
        ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
                                    &dst_pte, &hash, flags);
        if (ret)
index fd89aebd86b0f4799de4e13219b6c794521d76d0..0891c5dee2d44b0311342af5ce9d330308445e67 100644 (file)
@@ -685,14 +685,14 @@ retry:
                 * For shmem mappings, khugepaged is allowed to remove page
                 * tables under us; pte_offset_map_lock() will deal with that.
                 */
-               if (is_vm_hugetlb_page(dst_vma)) {
+               if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
+                       err = uffd_ops->poison(dst_vma, dst_addr, increment);
+               } else if (is_vm_hugetlb_page(dst_vma)) {
                        err = hugetlb_mfill_atomic_pte(dst_vma, dst_addr,
                                        src_addr, flags, &folio, increment);
                } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
                        err = mfill_atomic_pte_continue(dst_vma, dst_addr,
                                                        flags);
-               } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
-                       err = mfill_atomic_pte_poison(dst_vma, dst_addr, flags);
                } else if (!(dst_vma->vm_flags & VM_SHARED)) {
                        /*
                         * The normal page fault path for a shmem will invoke