From: Liam R. Howlett Date: Tue, 28 Oct 2025 19:06:36 +0000 (-0400) Subject: mm/userfaultfd: Use uffd_info for uffd_ctx_lock_and_validate_dst() X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=ba76e00461c6986ec4f94553b08bbd46641bc2e0;p=users%2Fjedix%2Flinux-maple.git mm/userfaultfd: Use uffd_info for uffd_ctx_lock_and_validate_dst() uffd_info has all the necessary information for many function including uffd_ctx_lock_and_validate_dst(), so just pass the struct around as needed. Signed-off-by: Liam R. Howlett --- diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 931b428dd7ec..9c45223e7d92 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -561,11 +561,8 @@ uffd_failed_do_unlock(struct userfaultfd_ctx *ctx, struct vm_area_struct *dst, */ static inline ssize_t uffd_ctx_lock_and_validate_dst(struct userfaultfd_ctx *ctx, - struct vm_area_struct *dst_vma, unsigned long dst_start, - unsigned long len, uffd_flags_t flags) + struct uffd_info *info) { - const struct vm_uffd_ops *uffd_ops; - /* * If memory mappings are changing because of non-cooperative * operation (e.g. mremap) running in parallel, bail out and @@ -579,30 +576,20 @@ uffd_ctx_lock_and_validate_dst(struct userfaultfd_ctx *ctx, * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but * it will overwrite vm_ops, so vma_is_anonymous must return false. */ - if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && - dst_vma->vm_flags & VM_SHARED)) + if (WARN_ON_ONCE(vma_is_anonymous(info->dst_vma) && + info->dst_vma->vm_flags & VM_SHARED)) return -EINVAL; /* * validate 'mode' now that we know the dst_vma: don't allow * a wrprotect copy if the userfaultfd didn't register as WP. */ - if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP)) + if (info->wp && !(info->dst_vma->vm_flags & VM_UFFD_WP)) return -EINVAL; - uffd_ops = vma_get_uffd_ops(dst_vma); - WARN_ON_ONCE(!uffd_ops || !uffd_ops->is_dst_valid); - - /* - * There is no default zero huge page for all huge page sizes as - * supported by hugetlb. A PMD_SIZE huge pages may exist as used - * by THP. Since we can not reliably insert a zero page, this - * feature is not supported. - */ - if (!uffd_ops->zeropage && - uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) - return -EINVAL; + WARN_ON_ONCE(!info->uffd_ops->is_dst_valid); - return uffd_ops->is_dst_valid(dst_vma, dst_start, len); + return info->uffd_ops->is_dst_valid(info->dst_vma, info->dst_addr, + info->len); } ssize_t @@ -648,7 +635,6 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx, unsigned long src_addr, dst_addr; long copied; struct folio *folio; - const struct vm_uffd_ops *uffd_ops; unsigned long increment; struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len, flags & MFILL_ATOMIC_WP); @@ -678,23 +664,32 @@ retry: goto out; dst_vma = info.dst_vma; - uffd_ops = vma_get_uffd_ops(dst_vma); - if (WARN_ON_ONCE(!uffd_ops->increment)) { + info.uffd_ops = vma_get_uffd_ops(dst_vma); + if (WARN_ON_ONCE(!info.uffd_ops->increment)) { increment = PAGE_SIZE; } else { - increment = uffd_ops->increment(dst_vma); + increment = info.uffd_ops->increment(dst_vma); } - err = uffd_ctx_lock_and_validate_dst(ctx, dst_vma, dst_start, len, - flags); + err = uffd_ctx_lock_and_validate_dst(ctx, &info); if (err) goto out_unlock; err = -EINVAL; - if (!uffd_ops->cont && + if (!info.uffd_ops->cont && uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) goto out_unlock; + /* + * There is no default zero huge page for all huge page sizes as + * supported by hugetlb. A PMD_SIZE huge pages may exist as used + * by THP. Since we can not reliably insert a zero page, this + * feature is not supported. + */ + if (!info.uffd_ops->zeropage && + uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) + goto out_unlock; + info.increment = increment; while (src_addr < src_start + len) { VM_WARN_ON_ONCE(dst_addr >= dst_start + len); @@ -704,13 +699,14 @@ retry: * tables under us; pte_offset_map_lock() will deal with that. */ if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { - err = uffd_ops->poison(dst_vma, dst_addr, increment); + err = info.uffd_ops->poison(dst_vma, dst_addr, + increment); } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) { - err = uffd_ops->cont(dst_vma, dst_addr, + err = info.uffd_ops->cont(dst_vma, dst_addr, flags & MFILL_ATOMIC_WP, increment); } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) { - err = uffd_ops->copy(dst_vma, dst_addr, src_addr, + err = info.uffd_ops->copy(dst_vma, dst_addr, src_addr, flags & MFILL_ATOMIC_WP, &folio, increment); /* @@ -725,14 +721,14 @@ retry: * the radix tree. */ } else { - err = uffd_ops->zeropage(dst_vma, dst_addr); + err = info.uffd_ops->zeropage(dst_vma, dst_addr); } cond_resched(); if (unlikely(err == -ENOENT)) { - err = uffd_ops->failed_do_unlock(ctx, dst_vma, folio, - src_addr); + err = info.uffd_ops->failed_do_unlock(ctx, dst_vma, + folio, src_addr); if (unlikely(err)) goto out;