struct vm_area_struct *dst_vma;
ssize_t err;
long copied;
- unsigned long increment;
struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len,
flags & MFILL_ATOMIC_WP);
dst_vma = info.dst_vma;
info.uffd_ops = vma_get_uffd_ops(dst_vma);
if (WARN_ON_ONCE(!info.uffd_ops->increment)) {
- increment = PAGE_SIZE;
+ info.increment = PAGE_SIZE;
} else {
- increment = info.uffd_ops->increment(dst_vma);
+ info.increment = info.uffd_ops->increment(dst_vma);
}
err = uffd_ctx_lock_and_validate_dst(ctx, &info);
if (err)
goto out_unlock;
- err = -EINVAL;
- if (!info.uffd_ops->cont &&
- uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
- goto out_unlock;
-
/*
- * There is no default zero huge page for all huge page sizes as
- * supported by hugetlb. A PMD_SIZE huge pages may exist as used
- * by THP. Since we can not reliably insert a zero page, this
- * feature is not supported.
+ * For shmem mappings, khugepaged is allowed to remove page
+ * tables under us; pte_offset_map_lock() will deal with that.
*/
- if (!info.uffd_ops->zeropage &&
- uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE))
+ if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
+ info.op = info.uffd_ops->poison;
+ } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
+ info.op = info.uffd_ops->cont;
+ } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
+ info.op = info.uffd_ops->copy;
+ /*
+ * The normal page fault path for a shmem will invoke
+ * the fault, fill the hole in the file and COW it right
+ * away. The result generates plain anonymous memory. So
+ * when we are asked to fill an hole in a MAP_PRIVATE
+ * shmem mapping, we'll generate anonymous memory
+ * directly without actually filling the hole. For the
+ * MAP_PRIVATE case the robustness check only happens in
+ * the pagetable (to verify it's still none) and not in
+ * the radix tree.
+ */
+ } else {
+ info.op = info.uffd_ops->zeropage;
+ }
+
+ err = -EINVAL;
+ if (!info.op)
goto out_unlock;
- info.increment = increment;
while (info.src_addr < src_start + len) {
VM_WARN_ON_ONCE(info.dst_addr >= dst_start + len);
-
- /*
- * For shmem mappings, khugepaged is allowed to remove page
- * tables under us; pte_offset_map_lock() will deal with that.
- */
- if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
- err = info.uffd_ops->poison(&info);
- } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
- err = info.uffd_ops->cont(&info);
- } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
- err = info.uffd_ops->copy(&info);
- /*
- * The normal page fault path for a shmem will invoke
- * the fault, fill the hole in the file and COW it right
- * away. The result generates plain anonymous memory. So
- * when we are asked to fill an hole in a MAP_PRIVATE
- * shmem mapping, we'll generate anonymous memory
- * directly without actually filling the hole. For the
- * MAP_PRIVATE case the robustness check only happens in
- * the pagetable (to verify it's still none) and not in
- * the radix tree.
- */
- } else {
- err = info.uffd_ops->zeropage(&info);
- }
-
+ err = info.op(&info);
cond_resched();
-
if (unlikely(err == -ENOENT)) {
err = info.uffd_ops->failed_do_unlock(ctx, dst_vma,
info.foliop, info.src_addr);
}
if (!err) {
- info.dst_addr += increment;
- info.src_addr += increment;
- copied += increment;
-
+ info.dst_addr += info.increment;
+ info.src_addr += info.increment;
+ copied += info.increment;
if (fatal_signal_pending(current))
err = -EINTR;
}