struct uffdio_copy uffdio_copy;
struct uffdio_copy __user *user_uffdio_copy;
struct userfaultfd_wake_range range;
- uffd_flags_t flags = 0;
+ bool wp = false;
user_uffdio_copy = (struct uffdio_copy __user *) arg;
if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
goto out;
if (uffdio_copy.mode & UFFDIO_COPY_MODE_WP)
- flags |= MFILL_ATOMIC_WP;
+ wp = true;
if (mmget_not_zero(ctx->mm)) {
ret = mfill_atomic_copy(ctx, uffdio_copy.dst, uffdio_copy.src,
- uffdio_copy.len, flags);
+ uffdio_copy.len, wp);
mmput(ctx->mm);
} else {
return -ESRCH;
unsigned long dst_addr, struct page *page,
bool newly_allocated, bool wp);
-extern ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
- unsigned long src_start, unsigned long len,
- uffd_flags_t flags);
+ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
+ unsigned long src_start, unsigned long len, bool wp);
extern ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
unsigned long dst_start,
unsigned long len);
VM_WARN_ON_ONCE(info->dst_addr >= info->dst_last);
err = info->op(info);
cond_resched();
- if (unlikely(err == -ENOENT)) {
- err = info->uffd_ops->failed_do_unlock(info);
- if (unlikely(err))
- return err; /* Unlocked already */
-
- return -ENOENT;
- } else {
- VM_WARN_ON_ONCE(info->foliop);
- }
-
if (!err) {
uffd_info_inc(info);
if (fatal_signal_pending(current))
flags & MFILL_ATOMIC_WP);
mfill_atomic_validate(&info);
-retry:
-
err = mfill_setup(&info);
if (err)
goto out;
}
err = mfill_loop(&info);
- if (unlikely(err == -ENOENT))
- goto retry;
out:
- if (info.foliop)
- folio_put(info.foliop);
VM_WARN_ON_ONCE(info.copied < 0);
VM_WARN_ON_ONCE(err > 0);
VM_WARN_ON_ONCE(!info.copied && !err);
return info.copied ? info.copied : err;
}
+static __always_inline ssize_t mfill_copy_loop(struct uffd_info *info)
+{
+ ssize_t err = -EINVAL;
+
+ if (!info->op)
+ goto out_unlock;
+
+ err = 0;
+ while (!err && (info->src_addr < info->src_last)) {
+ VM_WARN_ON_ONCE(info->dst_addr >= info->dst_last);
+ err = info->op(info);
+ cond_resched();
+ if (unlikely(err == -ENOENT)) {
+ err = info->uffd_ops->failed_do_unlock(info);
+ if (unlikely(err))
+ return err; /* Unlocked already */
+
+ return -ENOENT;
+ } else {
+ VM_WARN_ON_ONCE(info->foliop);
+ }
+
+ if (!err) {
+ uffd_info_inc(info);
+ if (fatal_signal_pending(current))
+ err = -EINTR;
+ }
+ }
+
+out_unlock:
+ uffd_ctx_unlock(info);
+ return err;
+}
+
ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
- unsigned long src_start, unsigned long len,
- uffd_flags_t flags)
+ unsigned long src_start, unsigned long len, bool wp)
{
- return mfill_atomic(ctx, dst_start, src_start, len,
- uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY));
+ ssize_t err;
+ struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len, ctx,
+ wp);
+
+ mfill_atomic_validate(&info);
+retry:
+
+ err = mfill_setup(&info);
+ if (err)
+ goto out;
+
+ info.op = info.uffd_ops->copy;
+ err = mfill_copy_loop(&info);
+ if (unlikely(err == -ENOENT))
+ goto retry;
+
+out:
+ if (info.foliop)
+ folio_put(info.foliop);
+ VM_WARN_ON_ONCE(info.copied < 0);
+ VM_WARN_ON_ONCE(err > 0);
+ VM_WARN_ON_ONCE(!info.copied && !err);
+ return info.copied ? info.copied : err;
}
ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,