*/
static inline ssize_t
uffd_ctx_lock_and_validate_dst(struct userfaultfd_ctx *ctx,
- struct vm_area_struct *dst_vma, unsigned long dst_start,
- unsigned long len, uffd_flags_t flags)
+ struct uffd_info *info)
{
- const struct vm_uffd_ops *uffd_ops;
-
/*
* If memory mappings are changing because of non-cooperative
* operation (e.g. mremap) running in parallel, bail out and
* shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
* it will overwrite vm_ops, so vma_is_anonymous must return false.
*/
- if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
- dst_vma->vm_flags & VM_SHARED))
+ if (WARN_ON_ONCE(vma_is_anonymous(info->dst_vma) &&
+ info->dst_vma->vm_flags & VM_SHARED))
return -EINVAL;
/*
* validate 'mode' now that we know the dst_vma: don't allow
* a wrprotect copy if the userfaultfd didn't register as WP.
*/
- if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
+ if (info->wp && !(info->dst_vma->vm_flags & VM_UFFD_WP))
return -EINVAL;
- uffd_ops = vma_get_uffd_ops(dst_vma);
- WARN_ON_ONCE(!uffd_ops || !uffd_ops->is_dst_valid);
-
- /*
- * There is no default zero huge page for all huge page sizes as
- * supported by hugetlb. A PMD_SIZE huge pages may exist as used
- * by THP. Since we can not reliably insert a zero page, this
- * feature is not supported.
- */
- if (!uffd_ops->zeropage &&
- uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE))
- return -EINVAL;
+ WARN_ON_ONCE(!info->uffd_ops->is_dst_valid);
- return uffd_ops->is_dst_valid(dst_vma, dst_start, len);
+ return info->uffd_ops->is_dst_valid(info->dst_vma, info->dst_addr,
+ info->len);
}
ssize_t
unsigned long src_addr, dst_addr;
long copied;
struct folio *folio;
- const struct vm_uffd_ops *uffd_ops;
unsigned long increment;
struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len,
flags & MFILL_ATOMIC_WP);
goto out;
dst_vma = info.dst_vma;
- uffd_ops = vma_get_uffd_ops(dst_vma);
- if (WARN_ON_ONCE(!uffd_ops->increment)) {
+ info.uffd_ops = vma_get_uffd_ops(dst_vma);
+ if (WARN_ON_ONCE(!info.uffd_ops->increment)) {
increment = PAGE_SIZE;
} else {
- increment = uffd_ops->increment(dst_vma);
+ increment = info.uffd_ops->increment(dst_vma);
}
- err = uffd_ctx_lock_and_validate_dst(ctx, dst_vma, dst_start, len,
- flags);
+ err = uffd_ctx_lock_and_validate_dst(ctx, &info);
if (err)
goto out_unlock;
err = -EINVAL;
- if (!uffd_ops->cont &&
+ if (!info.uffd_ops->cont &&
uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
goto out_unlock;
+ /*
+ * There is no default zero huge page for all huge page sizes as
+ * supported by hugetlb. A PMD_SIZE huge pages may exist as used
+ * by THP. Since we can not reliably insert a zero page, this
+ * feature is not supported.
+ */
+ if (!info.uffd_ops->zeropage &&
+ uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE))
+ goto out_unlock;
+
info.increment = increment;
while (src_addr < src_start + len) {
VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
* tables under us; pte_offset_map_lock() will deal with that.
*/
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
- err = uffd_ops->poison(dst_vma, dst_addr, increment);
+ err = info.uffd_ops->poison(dst_vma, dst_addr,
+ increment);
} else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
- err = uffd_ops->cont(dst_vma, dst_addr,
+ err = info.uffd_ops->cont(dst_vma, dst_addr,
flags & MFILL_ATOMIC_WP,
increment);
} else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
- err = uffd_ops->copy(dst_vma, dst_addr, src_addr,
+ err = info.uffd_ops->copy(dst_vma, dst_addr, src_addr,
flags & MFILL_ATOMIC_WP,
&folio, increment);
/*
* the radix tree.
*/
} else {
- err = uffd_ops->zeropage(dst_vma, dst_addr);
+ err = info.uffd_ops->zeropage(dst_vma, dst_addr);
}
cond_resched();
if (unlikely(err == -ENOENT)) {
- err = uffd_ops->failed_do_unlock(ctx, dst_vma, folio,
- src_addr);
+ err = info.uffd_ops->failed_do_unlock(ctx, dst_vma,
+ folio, src_addr);
if (unlikely(err))
goto out;