unsigned long dst_addr; /* Target address */
unsigned long src_addr; /* Source address */
unsigned long len; /* Total length to copy */
+ unsigned long src_last; /* starting src_addr + len */
+ unsigned long dst_last; /* starting dst_addr + len */
struct folio *foliop; /* folio pointer for retry */
struct userfaultfd_ctx *ctx; /* The userfaultfd context */
bool wp; /* Operation is requesting write protection */
const struct vm_uffd_ops *uffd_ops; /* The vma uffd_ops pointer */
int (*op)(struct uffd_info *info); /* The operation to perform on the dst */
+ long copied;
};
#define UFFD_STRUCT_INIT(__dst_addr, __src_addr, __len, __ctx, __wp) { \
.dst_addr = (__dst_addr), \
.src_addr = (__src_addr), \
.len = (__len), \
+ .src_last = (__src_addr) + (__len), \
+ .dst_last = (__dst_addr) + (__len), \
.wp = (__wp), \
.ctx = (__ctx), \
.foliop = NULL, \
+ .copied = 0, \
}
void uffd_complete_register(struct vm_area_struct *vma);
uffd_mfill_unlock(info->dst_vma);
}
-static inline unsigned long uffd_info_inc(struct uffd_info *info)
+static inline void uffd_info_inc(struct uffd_info *info)
{
info->dst_addr += info->increment;
info->src_addr += info->increment;
- return info->increment;
+ info->copied += info->increment;
}
ssize_t
return 0;
}
-static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
- unsigned long dst_start,
- unsigned long src_start,
- unsigned long len,
- uffd_flags_t flags)
+static __always_inline void mfill_atomic_validate(struct uffd_info *info)
{
- ssize_t err;
- long copied;
- struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len, ctx,
- flags & MFILL_ATOMIC_WP);
-
/*
* Sanitize the command parameters:
*/
- VM_WARN_ON_ONCE(dst_start & ~PAGE_MASK);
- VM_WARN_ON_ONCE(len & ~PAGE_MASK);
+ VM_WARN_ON_ONCE(info->dst_addr & ~PAGE_MASK);
+ VM_WARN_ON_ONCE(info->len & ~PAGE_MASK);
/* Does the address range wrap, or is the span zero-sized? */
- VM_WARN_ON_ONCE(src_start + len <= src_start);
- VM_WARN_ON_ONCE(dst_start + len <= dst_start);
-
- copied = 0;
+ VM_WARN_ON_ONCE(info->src_last <= info->src_addr);
+ VM_WARN_ON_ONCE(info->dst_last <= info->dst_addr);
+}
-retry:
+static __always_inline ssize_t mfill_setup(struct uffd_info *info)
+{
+ ssize_t err;
/*
* Make sure the vma is not shared, that the dst range is
* both valid and fully within a single existing vma.
*/
- err = uffd_mfill_lock(&info);
+ err = uffd_mfill_lock(info);
if (err)
- goto out;
+ return err;
- err = uffd_ctx_lock_and_validate_dst(&info);
+ err = uffd_ctx_lock_and_validate_dst(info);
if (err)
goto out_unlock;
- err = uffd_info_setup(&info);
+ err = uffd_info_setup(info);
if (err)
goto out_unlock;
- err = uffd_info_set_ops(&info, flags);
- if (err)
- goto out_unlock;
+ return 0;
+
+out_unlock:
+ uffd_ctx_unlock(info);
+ return err;
+}
+
+static __always_inline ssize_t mfill_loop(struct uffd_info *info)
+{
+ ssize_t err = 0;
- while (info.src_addr < src_start + len) {
- VM_WARN_ON_ONCE(info.dst_addr >= dst_start + len);
- err = info.op(&info);
+ while (!err && (info->src_addr < info->src_last)) {
+ VM_WARN_ON_ONCE(info->dst_addr >= info->dst_last);
+ err = info->op(info);
cond_resched();
if (unlikely(err == -ENOENT)) {
- err = info.uffd_ops->failed_do_unlock(&info);
+ err = info->uffd_ops->failed_do_unlock(info);
if (unlikely(err))
- goto out;
+ return err; /* Unlocked already */
- goto retry;
+ return -ENOENT;
} else {
- VM_WARN_ON_ONCE(info.foliop);
+ VM_WARN_ON_ONCE(info->foliop);
}
if (!err) {
- copied += uffd_info_inc(&info);
+ uffd_info_inc(info);
if (fatal_signal_pending(current))
err = -EINTR;
}
- if (err)
- break;
}
-out_unlock:
- uffd_ctx_unlock(&info);
+ uffd_ctx_unlock(info);
+ return err;
+}
+
+static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
+ unsigned long dst_start,
+ unsigned long src_start,
+ unsigned long len,
+ uffd_flags_t flags)
+{
+ ssize_t err;
+ struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len, ctx,
+ flags & MFILL_ATOMIC_WP);
+
+ mfill_atomic_validate(&info);
+retry:
+
+ err = mfill_setup(&info);
+ if (err)
+ goto out;
+
+ err = uffd_info_set_ops(&info, flags);
+ if (err) {
+ uffd_ctx_unlock(&info);
+ goto out;
+ }
+
+ err = mfill_loop(&info);
+ if (unlikely(err == -ENOENT))
+ goto retry;
+
out:
if (info.foliop)
folio_put(info.foliop);
- VM_WARN_ON_ONCE(copied < 0);
+ VM_WARN_ON_ONCE(info.copied < 0);
VM_WARN_ON_ONCE(err > 0);
- VM_WARN_ON_ONCE(!copied && !err);
- return copied ? copied : err;
+ VM_WARN_ON_ONCE(!info.copied && !err);
+ return info.copied ? info.copied : err;
}
ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,