ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
unsigned long src_start, unsigned long len, bool wp);
-extern ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
- unsigned long dst_start,
- unsigned long len);
+ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
+ unsigned long dst_start, unsigned long len);
extern ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long dst_start,
unsigned long len, uffd_flags_t flags);
extern ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
static __always_inline ssize_t mfill_loop(struct uffd_info *info)
{
- ssize_t err = 0;
+ ssize_t err = -EINVAL;
+
+ if (!info->op)
+ goto out_unlock;
+ err = 0;
while (!err && (info->src_addr < info->src_last)) {
VM_WARN_ON_ONCE(info->dst_addr >= info->dst_last);
err = info->op(info);
}
}
+out_unlock:
uffd_ctx_unlock(info);
return err;
}
}
ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
- unsigned long start,
- unsigned long len)
+ unsigned long dst_start, unsigned long len)
{
- return mfill_atomic(ctx, start, 0, len,
- uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE));
+ ssize_t err;
+ struct uffd_info info = UFFD_STRUCT_INIT(dst_start, 0, len, ctx,
+ false);
+
+ mfill_atomic_validate(&info);
+ err = mfill_setup(&info);
+ if (err)
+ goto out;
+
+ info.op = info.uffd_ops->zeropage;
+ err = mfill_loop(&info);
+
+out:
+ VM_WARN_ON_ONCE(info.copied < 0);
+ VM_WARN_ON_ONCE(err > 0);
+ VM_WARN_ON_ONCE(!info.copied && !err);
+ return info.copied ? info.copied : err;
}
ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start,