return 0;
}
+static inline ssize_t uffd_info_set_ops(struct uffd_info *info,
+ uffd_flags_t flags)
+{
+ /*
+ * For shmem mappings, khugepaged is allowed to remove page
+ * tables under us; pte_offset_map_lock() will deal with that.
+ */
+ if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
+ info->op = info->uffd_ops->poison;
+ } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
+ info->op = info->uffd_ops->cont;
+ } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
+ info->op = info->uffd_ops->copy;
+ /*
+ * The normal page fault path for a shmem will invoke
+ * the fault, fill the hole in the file and COW it right
+ * away. The result generates plain anonymous memory. So
+ * when we are asked to fill an hole in a MAP_PRIVATE
+ * shmem mapping, we'll generate anonymous memory
+ * directly without actually filling the hole. For the
+ * MAP_PRIVATE case the robustness check only happens in
+ * the pagetable (to verify it's still none) and not in
+ * the radix tree.
+ */
+ } else {
+ info->op = info->uffd_ops->zeropage;
+ }
+
+ if (!info->op)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline ssize_t uffd_info_setup(struct uffd_info *info)
+{
+
+ ssize_t err;
+
+ info->uffd_ops = vma_get_uffd_ops(info->dst_vma);
+ WARN_ON_ONCE(!info->uffd_ops->increment);
+ WARN_ON_ONCE(!info->uffd_ops->is_dst_valid);
+
+ info->increment = info->uffd_ops->increment(info->dst_vma);
+ err = info->uffd_ops->is_dst_valid(info->dst_vma, info->dst_addr,
+ info->len);
+ if (err)
+ return err;
+
+ return 0;
+}
static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
unsigned long dst_start,
if (err)
goto out_unlock;
- info.uffd_ops = vma_get_uffd_ops(info.dst_vma);
- WARN_ON_ONCE(!info.uffd_ops->increment);
- WARN_ON_ONCE(!info.uffd_ops->is_dst_valid);
-
- info.increment = info.uffd_ops->increment(info.dst_vma);
- err = info.uffd_ops->is_dst_valid(info.dst_vma, info.dst_addr,
- info.len);
+ err = uffd_info_setup(&info);
if (err)
goto out_unlock;
- /*
- * For shmem mappings, khugepaged is allowed to remove page
- * tables under us; pte_offset_map_lock() will deal with that.
- */
- if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
- info.op = info.uffd_ops->poison;
- } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
- info.op = info.uffd_ops->cont;
- } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
- info.op = info.uffd_ops->copy;
- /*
- * The normal page fault path for a shmem will invoke
- * the fault, fill the hole in the file and COW it right
- * away. The result generates plain anonymous memory. So
- * when we are asked to fill an hole in a MAP_PRIVATE
- * shmem mapping, we'll generate anonymous memory
- * directly without actually filling the hole. For the
- * MAP_PRIVATE case the robustness check only happens in
- * the pagetable (to verify it's still none) and not in
- * the radix tree.
- */
- } else {
- info.op = info.uffd_ops->zeropage;
- }
- err = -EINVAL;
- if (!info.op)
+ err = uffd_info_set_ops(&info, flags);
+ if (err)
goto out_unlock;
while (info.src_addr < src_start + len) {