struct uffdio_continue uffdio_continue;
struct uffdio_continue __user *user_uffdio_continue;
struct userfaultfd_wake_range range;
- uffd_flags_t flags = 0;
+ bool wp = false;
user_uffdio_continue = (struct uffdio_continue __user *)arg;
UFFDIO_CONTINUE_MODE_WP))
goto out;
if (uffdio_continue.mode & UFFDIO_CONTINUE_MODE_WP)
- flags |= MFILL_ATOMIC_WP;
+ wp = true;
if (mmget_not_zero(ctx->mm)) {
ret = mfill_atomic_continue(ctx, uffdio_continue.range.start,
- uffdio_continue.range.len, flags);
+ uffdio_continue.range.len, wp);
mmput(ctx->mm);
} else {
return -ESRCH;
unsigned long src_start, unsigned long len, bool wp);
ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
unsigned long dst_start, unsigned long len);
-extern ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long dst_start,
- unsigned long len, uffd_flags_t flags);
+ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx,
+ unsigned long dst_start, unsigned long len, bool wp);
extern ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
unsigned long len, uffd_flags_t flags);
extern int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
return info.copied ? info.copied : err;
}
-ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start,
- unsigned long len, uffd_flags_t flags)
+ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx,
+ unsigned long dst_start, unsigned long len, bool wp)
{
+ ssize_t err;
+ struct uffd_info info = UFFD_STRUCT_INIT(dst_start, 0, len, ctx,
+ wp);
/*
* A caller might reasonably assume that UFFDIO_CONTINUE contains an
* smp_wmb() to ensure that any writes to the about-to-be-mapped page by
* subsequent loads from the page through the newly mapped address range.
*/
smp_wmb();
+ mfill_atomic_validate(&info);
+ err = mfill_setup(&info);
+ if (err)
+ goto out;
- return mfill_atomic(ctx, start, 0, len,
- uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
+ info.op = info.uffd_ops->cont;
+ err = mfill_loop(&info);
+
+out:
+ VM_WARN_ON_ONCE(info.copied < 0);
+ VM_WARN_ON_ONCE(err > 0);
+ VM_WARN_ON_ONCE(!info.copied && !err);
+ return info.copied ? info.copied : err;
}
ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,