static inline
ssize_t hugetlb_mfill_prepare(struct vm_area_struct *dst_vma, unsigned long dst_addr,
- unsigned long increment, pte_t **dst_pte, u32 *hash, uffd_flags_t flags)
+ unsigned long increment, pte_t **dst_pte, u32 *hash, bool wp)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
struct address_space *mapping;
return -ENOMEM;
}
- if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
+ if (!wp &&
!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, *dst_pte))) {
hugetlb_vma_unlock_read(dst_vma);
mutex_unlock(&hugetlb_fault_mutex_table[*hash]);
u32 hash;
ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
- &dst_pte, &hash, flags);
+ &dst_pte, &hash, flags & MFILL_ATOMIC_WP);
if (ret)
return ret;
u32 hash;
ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
- &dst_pte, &hash, flags);
+ &dst_pte, &hash, wp_enabled);
if (ret)
return ret;