vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
#ifdef CONFIG_USERFAULTFD
-ssize_t hugetlb_mfill_prepare(struct vm_area_struct *dst_vma, unsigned long dst_addr,
- unsigned long increment, pte_t **dst_pte, u32 *hash, uffd_flags_t flags);
-int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr,
- unsigned long src_addr,
- uffd_flags_t flags,
- struct folio **foliop, u32 hash);
+int hugetlb_mfill_atomic_pte(struct vm_area_struct *dst_vma,
+ unsigned long dst_addr, unsigned long src_addr,
+ uffd_flags_t flags, struct folio **foliop, unsigned long hash);
#endif /* CONFIG_USERFAULTFD */
long hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_desc *desc, vm_flags_t vm_flags);
#ifdef CONFIG_USERFAULTFD
+static inline
ssize_t hugetlb_mfill_prepare(struct vm_area_struct *dst_vma, unsigned long dst_addr,
unsigned long increment, pte_t **dst_pte, u32 *hash, uffd_flags_t flags)
{
* Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
* with modifications for hugetlb pages.
*/
-int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
- struct vm_area_struct *dst_vma,
+int hugetlb_mfill_atomic_pte(struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
struct folio **foliop,
- u32 hash)
+ unsigned long increment)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
unsigned long size = huge_page_size(h);
int vm_shared = dst_vma->vm_flags & VM_SHARED;
pte_t _dst_pte;
+ pte_t *dst_pte;
spinlock_t *ptl;
int ret = -ENOMEM;
struct folio *folio;
bool folio_in_pagecache = false;
+ u32 hash;
+
+ ret = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
+ &dst_pte, &hash, flags);
+ if (ret)
+ return ret;
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
ptl = huge_pte_lock(h, dst_mm, dst_pte);
* called with either vma-lock or mmap_lock held, it will release the lock
* before returning.
*/
-static __always_inline ssize_t mfill_atomic_hugetlb(
- struct userfaultfd_ctx *ctx,
+static __always_inline ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
struct vm_area_struct *dst_vma,
unsigned long dst_start,
unsigned long src_start,
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
ssize_t err;
- pte_t *dst_pte;
unsigned long src_addr, dst_addr;
long copied;
struct folio *folio;
- u32 hash;
const struct vm_uffd_ops *uffd_ops;
unsigned long increment;
while (src_addr < src_start + len) {
VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
- err = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
- &dst_pte, &hash, flags);
- if (err)
- goto out_unlock;
-
- err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
- src_addr, flags, &folio, hash);
+ err = hugetlb_mfill_atomic_pte(dst_vma, dst_addr, src_addr,
+ flags, &folio, increment);
cond_resched();