bool folio_in_pagecache = false;
u32 hash;
- if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON))
- return hugetlb_mfill_pte_poison(dst_vma, dst_addr, increment);
-
ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
&dst_pte, &hash, flags);
if (ret)
* For shmem mappings, khugepaged is allowed to remove page
* tables under us; pte_offset_map_lock() will deal with that.
*/
- if (is_vm_hugetlb_page(dst_vma)) {
+ if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
+ err = uffd_ops->poison(dst_vma, dst_addr, increment);
+ } else if (is_vm_hugetlb_page(dst_vma)) {
err = hugetlb_mfill_atomic_pte(dst_vma, dst_addr,
src_addr, flags, &folio, increment);
} else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
err = mfill_atomic_pte_continue(dst_vma, dst_addr,
flags);
- } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
- err = mfill_atomic_pte_poison(dst_vma, dst_addr, flags);
} else if (!(dst_vma->vm_flags & VM_SHARED)) {
/*
* The normal page fault path for a shmem will invoke