unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
- struct folio **foliop);
+ struct folio **foliop, u32 hash);
#endif /* CONFIG_USERFAULTFD */
long hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_desc *desc, vm_flags_t vm_flags);
unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
- struct folio **foliop)
+ struct folio **foliop,
+ u32 hash)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
ptl = huge_pte_lock(h, dst_mm, dst_pte);
+ ret = -EEXIST;
/* Don't overwrite any existing PTEs (even markers) */
if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
spin_unlock(ptl);
- return -EEXIST;
+ goto out;
}
_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
update_mmu_cache(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
- return 0;
+ ret = 0;
+ goto out;
}
if (is_continue) {
folio_unlock(folio);
ret = 0;
out:
+ hugetlb_vma_unlock_read(dst_vma);
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
return ret;
out_release_unlock:
spin_unlock(ptl);
}
err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
- src_addr, flags, &folio);
+ src_addr, flags, &folio, hash);
- hugetlb_vma_unlock_read(dst_vma);
- mutex_unlock(&hugetlb_fault_mutex_table[hash]);
cond_resched();